diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml
index c0e1544d43..0de92695f8 100644
--- a/.github/workflows/cla.yml
+++ b/.github/workflows/cla.yml
@@ -4,10 +4,11 @@ on:
issue_comment:
types: [created]
pull_request_target:
- types: [opened,closed,synchronize]
+ types: [opened,closed,synchronize]
jobs:
cla-check:
+ if: github.repository_owner == 'mlcommons'
runs-on: ubuntu-latest
steps:
- name: "MLCommons CLA bot check"
@@ -22,7 +23,7 @@ jobs:
path-to-signatures: 'cla-bot/v1/cla.json'
# branch should not be protected
branch: 'main'
- allowlist: user1,bot*
+ allowlist: user1,mlcommons-bot,bot*
remote-organization-name: mlcommons
remote-repository-name: systems
diff --git a/.github/workflows/test-cmx-image-classification-onnx.yml b/.github/workflows/test-cmx-image-classification-onnx.yml
index de8250525b..7ff5d8e6a5 100644
--- a/.github/workflows/test-cmx-image-classification-onnx.yml
+++ b/.github/workflows/test-cmx-image-classification-onnx.yml
@@ -1,7 +1,7 @@
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
-name: 'CMX: image classification with ONNX'
+name: 'Image classification with ONNX via CMX'
on:
pull_request:
@@ -28,8 +28,7 @@ jobs:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
- python3 -m pip install cmind
- cmx pull repo --url=https://github.com/mlcommons/cm4mlops
+ python3 -m pip install cmx4mlops
cmx test core
- name: Test image classification with ONNX
run: |
diff --git a/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml b/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml
new file mode 100644
index 0000000000..38fd814545
--- /dev/null
+++ b/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml
@@ -0,0 +1,66 @@
+# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
+# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
+
+name: MLPerf inference bert (deepsparse, tf, onnxruntime, pytorch)
+
+on:
+ pull_request_target:
+ branches: [ "main", "master", "dev"]
+ paths:
+ - '.github/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml'
+ - '**'
+ - '!**.md'
+
+jobs:
+ build:
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ # 3.12 didn't work on 20240305 - need to check
+ python-version: [ "3.11" ]
+ backend: [ "deepsparse", "tf", "onnxruntime", "pytorch" ]
+ precision: [ "int8", "fp32" ]
+ os: [ubuntu-latest, windows-latest, macos-latest]
+ exclude:
+ - backend: tf
+ - backend: pytorch
+ - backend: onnxruntime
+ - precision: fp32
+ - os: windows-latest
+
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v3
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: Configure git longpaths (Windows)
+ if: matrix.os == 'windows-latest'
+ run: |
+ git config --system core.longpaths true
+ - name: Install dependencies
+ run: |
+ python3 -m pip install cmx4mlops
+ - name: Test MLPerf Inference Bert ${{ matrix.backend }} on ${{ matrix.os }}
+ if: matrix.os == 'windows-latest'
+ run: |
+ cmx run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --precision=${{ matrix.precision }} --target_qps=1 --v --quiet
+ - name: Test MLPerf Inference Bert ${{ matrix.backend }} on ${{ matrix.os }}
+ if: matrix.os != 'windows-latest'
+ run: |
+ cmx run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=1 --v --quiet
+ - name: Push Test MLPerf Results (only for cTuning dev branches)
+ if: github.repository_owner == 'ctuning'
+ env:
+ USER: "GitHub Action"
+ EMAIL: "admin@cTuning.org"
+ GITHUB_TOKEN: ${{ secrets.TEST_RESULTS_GITHUB_TOKEN }}
+ run: |
+ git config --global user.name "${{ env.USER }}"
+ git config --global user.email "${{ env.EMAIL }}"
+ git config --global credential.https://github.com.helper ""
+ git config --global credential.https://github.com.helper "!gh auth git-credential"
+ git config --global credential.https://gist.github.com.helper ""
+ git config --global credential.https://gist.github.com.helper "!gh auth git-credential"
+ cmx run script --tags=push,github,mlperf,inference,submission --repo_url="https://github.com/ctuning/test_mlperf_inference_submissions" --repo_branch=main --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet
diff --git a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml
new file mode 100644
index 0000000000..2b05eebd95
--- /dev/null
+++ b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml
@@ -0,0 +1,66 @@
+# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
+# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
+
+name: 'MLPerf inference ResNet50 via CMX'
+
+on:
+ pull_request_target:
+ branches: [ "main", "master", "dev"]
+ paths:
+ - '.github/workflows/test-cmx-mlperf-inference-resnet50.yml'
+ - '**'
+ - '!**.md'
+jobs:
+ build:
+ runs-on: ${{ matrix.os }}
+ env:
+ CM_INDEX: "on"
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [ubuntu-latest, windows-latest, macos-latest]
+ python-version: [ "3.12" ]
+ backend: [ "onnxruntime", "tf" ]
+ implementation: [ "python", "cpp" ]
+ exclude:
+ - backend: tf
+ implementation: cpp
+ - os: macos-latest
+ backend: tf
+ - os: windows-latest
+ implementation: cpp
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v3
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: Configure git longpaths (Windows)
+ if: matrix.os == 'windows-latest'
+ run: |
+ git config --system core.longpaths true
+ - name: Install dependencies
+ run: |
+ python3 -m pip install cmx4mlops
+ - name: Test MLPerf Inference ResNet50 (Windows)
+ if: matrix.os == 'windows-latest'
+ run: |
+ cmx run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 --quiet
+ - name: Test MLPerf Inference ResNet50 (Linux/macOS)
+ if: matrix.os != 'windows-latest'
+ run: |
+ cmx run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 --v --quiet
+ - name: Push Test MLPerf Results (only for cTuning dev branches)
+ if: github.repository_owner == 'ctuning'
+ env:
+ USER: "GitHub Action"
+ EMAIL: "admin@cTuning.org"
+ GITHUB_TOKEN: ${{ secrets.TEST_RESULTS_GITHUB_TOKEN }}
+ run: |
+ git config --global user.name "${{ env.USER }}"
+ git config --global user.email "${{ env.EMAIL }}"
+ git config --global credential.https://github.com.helper ""
+ git config --global credential.https://github.com.helper "!gh auth git-credential"
+ git config --global credential.https://gist.github.com.helper ""
+ git config --global credential.https://gist.github.com.helper "!gh auth git-credential"
+ cmx run script --tags=push,github,mlperf,inference,submission --repo_url="https://github.com/ctuning/test_mlperf_inference_submissions" --repo_branch=main --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet
diff --git a/.github/workflows/test-cmx-mlperf-inference-rgat.yml b/.github/workflows/test-cmx-mlperf-inference-rgat.yml
new file mode 100644
index 0000000000..bc6bd8070c
--- /dev/null
+++ b/.github/workflows/test-cmx-mlperf-inference-rgat.yml
@@ -0,0 +1,51 @@
+name: MLPerf inference R-GAT via CMX
+
+on:
+ pull_request:
+ branches: [ "main", "master", "dev" ]
+ paths:
+ - '.github/workflows/test-mlperf-inference-rgat.yml'
+ - '**'
+ - '!**.md'
+
+jobs:
+ build:
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [ubuntu-latest]
+ python-version: [ "3.12" ]
+ backend: [ "pytorch" ]
+ implementation: [ "python" ]
+
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v3
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: Configure git longpaths (Windows)
+ if: matrix.os == 'windows-latest'
+ run: |
+ git config --system core.longpaths true
+ - name: Install dependencies
+ run: |
+ python3 -m pip install cmx4mlops
+ - name: Test MLPerf Inference R-GAT using ${{ matrix.backend }} on ${{ matrix.os }}
+ run: |
+ cmx run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --adr.inference-src.tags=_branch.dev --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=rgat --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --adr.compiler.tags=gcc --category=datacenter --quiet --v --target_qps=1
+ - name: Push Test MLPerf Results (only for cTuning dev branches)
+ if: github.repository_owner == 'ctuning'
+ env:
+ USER: "GitHub Action"
+ EMAIL: "admin@cTuning.org"
+ GITHUB_TOKEN: ${{ secrets.TEST_RESULTS_GITHUB_TOKEN }}
+ run: |
+ git config --global user.name "${{ env.USER }}"
+ git config --global user.email "${{ env.EMAIL }}"
+ git config --global credential.https://github.com.helper ""
+ git config --global credential.https://github.com.helper "!gh auth git-credential"
+ git config --global credential.https://gist.github.com.helper ""
+ git config --global credential.https://gist.github.com.helper "!gh auth git-credential"
+ cmx run script --tags=push,github,mlperf,inference,submission --repo_url="https://github.com/ctuning/test_mlperf_inference_submissions" --repo_branch=main --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet
diff --git a/.github/workflows/test-cmx.yml b/.github/workflows/test-cmx.yml
new file mode 100644
index 0000000000..e66f37b4a2
--- /dev/null
+++ b/.github/workflows/test-cmx.yml
@@ -0,0 +1,52 @@
+# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
+# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
+
+name: CMX test
+
+on:
+ pull_request:
+ branches: [ "master", "main", "dev" ]
+ paths:
+ - '.github/workflows/test-cm.yml'
+ - 'cm/**'
+ - '!cm/**.md'
+
+jobs:
+ build:
+ strategy:
+ fail-fast: false
+ matrix:
+ python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
+ on: [ubuntu-latest, windows-latest, macos-latest]
+ exclude:
+ - python-version: "3.7"
+ on: "macos-latest"
+ runs-on: "${{ matrix.on }}"
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ ref: ${{ github.event.pull_request.head.sha }}
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v3
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: Install dependencies and test cm pull repo
+ run: |
+ python -m pip install --upgrade pip
+ python -m pip install flake8 pytest
+ pip install -r requirements.txt
+ python -m pip install --ignore-installed --verbose pip setuptools
+ python -m pip install cmx4mlops
+ python -m cmind
+ cm init
+ - name: Test
+ run: |
+ python tests/test_cm.py
+ cd && mkdir tmp_cm_repo
+ cd tmp_cm_repo && cm init repo
+ cmx add script tmp_cm_repo:my-test-script --tags=test,script
+ cmx add script .:my-test-script2 --tags=test2,script
+ cd && cmx add repo my-test-repo
+ cmx add script my-test-repo:my-test-script --tags=test,script
+ cd $HOME/CM/repos/my-test-repo && cmx add script .:my-test-script2 --tags=test2,script
+
diff --git a/HISTORY.CM.md b/HISTORY.CM.md
index ce9a2e43ed..7099169b46 100644
--- a/HISTORY.CM.md
+++ b/HISTORY.CM.md
@@ -1,4 +1,4 @@
-This document narrates the history of the creation and design of CM and CM4MLOps (also known as CK2)
+This document narrates the history of the creation and design of CM, CM4MLOps and MLPerf automations (also known as CK2)
by [Grigori Fursin](https://cKnowledge.org/gfursin). It also highlights the donation of this open-source technology to MLCommons,
aimed at benefiting the broader community and fostering its ongoing development as a collaborative, community-driven initiative:
diff --git a/README.md b/README.md
index 69fa3296ec..aa88af22e7 100755
--- a/README.md
+++ b/README.md
@@ -11,14 +11,14 @@
## Collective Knowledge
-[Collective Knowledge (CK, CM, CM4MLOps, CM4MLPerf and CMX)](https://cKnowledge.org)
+[Collective Knowledge (CK)](https://cKnowledge.org)
is an educational community project to learn how to run AI, ML and other emerging workloads
in the most efficient and cost-effective way across diverse models, data sets, software and hardware:
[ [white paper](https://arxiv.org/abs/2406.16791) ].
It includes the following sub-projects.
-### Collective Minds (CM)
+### Collective Mind (CM)
The Collective Mind (CM) project, or Collective Minds, facilitates the
decomposition of complex software systems into portable, reusable, and
@@ -140,6 +140,11 @@ Copyright (c) 2014-2021 cTuning foundation
* [Grigori Fursin](https://cKnowledge.org/gfursin) (FlexAI, cTuning)
+## Maintainers
+
+* CM, CM4MLOps and MLPerf automations: MLCommons
+* CMX (the next generation of CM): Grigori Fursin
+
## Long-term vision
To learn more about the motivation behind CK and CM technology, please explore the following presentations:
diff --git a/cm/cmind/__init__.py b/cm/cmind/__init__.py
index a90f5ed8e3..3c2df12659 100644
--- a/cm/cmind/__init__.py
+++ b/cm/cmind/__init__.py
@@ -9,7 +9,7 @@
# White paper: https://arxiv.org/abs/2406.16791
# Project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
-__version__ = "3.5.3"
+__version__ = "3.5.3.1"
from cmind.core import access
from cmind.core import x
diff --git a/cm/cmind/core.py b/cm/cmind/core.py
index 3f5e9cbcfa..9f56005f47 100644
--- a/cm/cmind/core.py
+++ b/cm/cmind/core.py
@@ -1125,6 +1125,7 @@ def _x(self, i, control):
print (' -raise - raise Python error when automation action fails')
print (' -time - print elapsed time for a given automation')
print (' -profile - profile a given automation')
+ print (' -i - print info about available memory and disk space')
print (' -repro - record various info to the cmx-repro directory to replay CMX command')
print ('')
print ('Check https://github.com/mlcommons/ck/tree/master/cm/docs/cmx for more details.')
diff --git a/cmx4mlops/README.md b/cmx4mlops/README.md
new file mode 100644
index 0000000000..a0990367ef
--- /dev/null
+++ b/cmx4mlops/README.md
@@ -0,0 +1 @@
+TBD
diff --git a/cmx4mlops/cmx4mlops/repo/.gitignore b/cmx4mlops/cmx4mlops/repo/.gitignore
new file mode 100644
index 0000000000..96523fae44
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/.gitignore
@@ -0,0 +1,20 @@
+build
+build/*
+MANIFEST
+*.pyc
+__pycache__
+develop-eggs/
+dist/
+eggs/
+.eggs/
+lib/
+lib64/
+sdist/
+wheels/
+.cache/
+.coverage
+htmlcov
+*tmp/
+*tmp-ck-*/
+local/cache/
+
diff --git a/cmx4mlops/cmx4mlops/repo/CHANGES.md b/cmx4mlops/cmx4mlops/repo/CHANGES.md
new file mode 100644
index 0000000000..8d7b12267e
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/CHANGES.md
@@ -0,0 +1,247 @@
+### 20241123
+ [November 2024 release of cm4mlops](https://github.com/mlcommons/cm4mlops/discussions/590)
+
+### 20240927
+ * added "test dummy" script to test Docker containers
+ * added more standard Nvidia Docker configuration for PyTorch
+ * added better support to select Docker configurations via UID
+
+### 20240916
+ * fixed "cm add script"
+
+### 20240429
+ * Added `cm set cfg` automation. For example, we can set default CM script to silent
+ using `cm set cfg default --key.script.silent`
+ * Added key `script_entry_repo_to_report_errors` to CM script meta
+ to customize where to report errors instead of the default repo.
+ For example, issues with the CM script `run-mlperf-inference-app`
+ should be reported at https://github.com/mlcommons/inference .
+ * Added saving running script from different deps without CM to tmp-run-without-cm.bat.
+ Example: `cmr "app image corner-detection" --debug-script-tags=benchmark,program`
+ * Generate Docker container sample during --repro (prototype)
+
+
+### 20240427
+ * Added cm run script --print_versions to print resolved versions of dependencies at the end of the run
+
+### 20240426
+ * Improved cm run script --repro function to dump dependencies, versions and generate README
+
+### 20240425
+ * CM script automation: fixed dumping versions (variations didn't have _ prefix)
+
+---
+
+Since March 2023, all updates to CM automations are submitted via PRs.
+You can follow our PRs at
+* https://github.com/ctuning/mlcommons-ck/commits/master
+* https://github.com/mlcommons/ck/pulls?q=is%3Apr+is%3Aclosed .
+
+---
+
+### 20230214
+ * experiment and graph gui are working now
+
+### 20230206:
+ * started prototyping cm run experiment
+
+### 20230123:
+ * added simple GUI to CM scripts
+
+### 20221206:
+ * added "script_name" to the CM "script" meta to specify any native script name
+ * added "--script_name" to "cm add script {alias} --script_name=my-native-script.sh"
+
+### 20221206:
+ * added CM_SCRIPT_EXTRA_CMD to force some flags to all scripts
+
+### 20221202:
+ * major updates for Windows (CL, CUDA, etc)
+
+### 20221111:
+ * various fixes for Student Cluster Competition at SuperComputing'22
+
+### 20221110:
+ * added support to push MLPerf results to W&B dashboard
+
+### 20221103:
+ * added "cm json2yaml utils" and "cm yaml2json utils"
+
+### 20221024:
+ * added --verbose and --time to "cm run script"
+
+### 20221017:
+ * removed the need for echo-off script
+
+### 20221010:
+ * added cm run script --debug-script-tags to run cmd/bash before native script
+ * added cm run script --shell to set env and run shell after script execution
+
+* 20221007:
+ * added script template (used when adding new scripts)
+ * major clean up of all scripts
+
+### 20220916:
+ * treat alias as tags if spaces:
+ cm run script "get compiler" is converted to cm run script --tags=get,compiler
+ * improved gcc detection
+ * refactored "cm run script" to skip deps in cache if needed
+
+### 20220906
+ * added --print_env flag to "cm run script" to print aggregated env
+ before running native scripts
+ * various fixes to support MLPerf automation
+
+### 20220823
+ * various fixes for universal MLPerf inference submission automation
+
+### 20220803
+ * various fixes for TVM and image classification
+
+### 20220802
+ * added "run_script_after_post_deps" to script meta to run script after post deps
+ (useful to activate python virtual env)
+ * added "activate-python-venv" script to make it easier to debug Python deps installation
+
+### 20220722
+ * added --accept-license and --skip-system-deps
+ (converted to env CM_ACCEPT_LICENSE ("True") and CM_SKIP_SYSTEM_DEPS ("True"))
+
+### 20220719
+ * moved relatively stable MLOps automation scripts here
+
+### 20220718
+ * fixed local_env_keys in get-python3
+ * added new_env_only_keys to meta to specify which env to keep
+ * fixed problem with adding tags from the selected script during caching
+ * added --skip-compile and --skip-run to script (converted to env CM_SKIP_COMPILE and CM_SKIP_RUN)
+ * fixed local_env_keys in get-python3
+ * added new_env_only_keys to get-python3
+
+### 20220713
+ * added local_env_keys to meta
+ * added "env" dict to os_info
+
+### 20220712
+ * major script refactoring to support cache tags update from deps
+ * fixed version min/max propagations in deps
+ * improvements to support tags from deps
+ * added tags from deps (python, llvm)
+
+### 20220708
+ * various fixes to handle versions (min/max/default)
+ * various fixes to avoid contamination of ENV from other scripts
+ * various fixes to handle versions (min/max/default)
+
+### 20220705
+ * fixes for remembered selections
+ * added --skip-remembered-selections to "cm run script"
+
+### 20220704
+ * fixed a bug with searching for scripts with variations
+ * added the possibility to update deps from pre/post processing
+ * added --extra-cache-tags and --name for "cm run script"
+ * added prototype of selection caching
+ * fixed get-python-venv
+
+### 20220701
+ * added dummy "cm test script"
+ * added "--env" to "cm show cache" to show env and state
+ * added "cm show cache"
+
+### 20220629
+ * added "detect_version_using_script" in script used to detect python packages
+ * major fix to properly support multiple scripts with the same tags, caching, selection, etc
+ * fixed a bug in version comparison (converting string to int)
+ * added recording of "version" to cache meta
+
+### 20220628
+ * fixed local_env with deps
+
+### 20220623
+ * important update of versions logic
+
+### 20220621
+ * added support for --quiet
+ * changed CM_NEED_VERSION to CM_VERSION
+ * added CM_VERSION_MIN, CM_VERSION_MAX
+ * added cm compare_versions utils --version1=... --version2=...
+ * added support to detect min/max/correct versions
+
+### 20220617
+ * fixed logic to handle variations (-_): https://github.com/mlcommons/ck/issues/243
+
+### 20220616
+ * changed "cached" to "cache" automation
+
+### 20220615
+ * major update of script (remove parallel env/new_env and state/new_state).
+ keep global env & state and detect changes automatically
+ * major simplification of "script"
+ * removed "installed" to be more understandable
+ * added "cached" to be more understandable
+
+### 20220609
+ * added "versions" key to the CM script meta
+ it works similar to "variations" and is forced by --version
+ * changed "ic" to "script" in "experiment" automation
+
+### 20220608
+ * updated "variations" logic in "script"!
+ meta['default_variation'] (str): only one of many
+ meta['default_variations'] (list): multiple choices
+ * deprecated "ic" automation. Use "script" instead!
+
+### 20220607
+ * added strip_folders to utils/unzip_file
+ * fixed minor bugs in CM script
+
+### 20220606
+ * added "name" key to deps (list of names and UIDs)
+ * added "add_deps_tags" in variations and in CMD ({"name":"tag(s)"})
+ * added "deps" to variations to be merged with the list of current deps
+ * added --input and --output for cm run script converted to env CM_INPUT and CM_OUTPUT
+ useful to create interactive CM scripts to process files
+ * Added prototype-test-deps-variations-tags to play with deps, variations, tags
+
+### 20220605
+ * clean tmp files in "script" automation by default and keep them using --dirty flag
+
+### 20220603
+ * added "skip" and "deps" to postprocess to call other scripts.
+ For example call install LLVM if detect LLVM fails...
+ * added "script" automation to substitute less intuitive "ic"
+ * Improved LLVM detection and installation
+ * Added example of image corner detection
+ * Added updated script entries
+
+### 20220601
+ * added version, path, skip_install and post_deps to IC
+ * added --new to IC to detect new components
+ * Updating mechanisms to install and/or detect LLVM
+ * added support to install prebuilt LLVM for Linux, MacOs, Windows
+
+### 20220530
+ * updated ic automation to read tmp-run-state.json
+ and merge it with the "new_state" dict
+
+### 20220524
+ * changed directory ck2-repo-mlops to cm-devops
+
+### 20220517
+ * Changed CM_PATH_LIST to +PATH
+ * Added general support for +ENV that is expanded to ENV=val1;val2;...:${ENV}
+
+### 20220511
+ * Better handle exceptions in utils.download_file
+ * Added support for variations in intelligent components (ic)
+ * Fixed bugs in IC
+ * Added "_" prefix in tags to specify variation of IC
+ * Record env.sh in "installed artifacts even if bat file is not executed
+ * Fixed experiment directory naming on Windows
+ * Added "cm version ic" (#233)
+ * Added prototype of ic::prototype-get-ml-model-resnet50-onnx with variations
+ * Added prototype of ic::prototype-get-imagenet-val with variations
+ * Added prototype of ic::prototype-get-imagenet-aux with variations
+ * Added prototype of ic::prototype-get-llvm
+ * Added prototype of ic::prototype-get-tvm
diff --git a/cmx4mlops/cmx4mlops/repo/CONTRIBUTING.md b/cmx4mlops/cmx4mlops/repo/CONTRIBUTING.md
new file mode 100644
index 0000000000..79d407acdf
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/CONTRIBUTING.md
@@ -0,0 +1 @@
+https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
diff --git a/cmx4mlops/cmx4mlops/repo/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/COPYRIGHT.md
new file mode 100644
index 0000000000..5aa453ab8c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/COPYRIGHT.md
@@ -0,0 +1,5 @@
+# Copyright Notice
+
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort.
diff --git a/cmx4mlops/cmx4mlops/repo/HISTORY.md b/cmx4mlops/cmx4mlops/repo/HISTORY.md
new file mode 100644
index 0000000000..4921bc0b9b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/HISTORY.md
@@ -0,0 +1,127 @@
+This document narrates the history of the creation and design of CM, CM4MLOps and MLPerf automations (also known as CK2)
+by [Grigori Fursin](https://cKnowledge.org/gfursin). It also highlights the donation of this open-source technology to MLCommons,
+aimed at benefiting the broader community and fostering its ongoing development as a collaborative, community-driven initiative:
+
+* Jan 28, 2021: After delivering an invited ACM TechTalk'21 about the Collective Knowledge framework (CK1)
+ and reproducibility initiatives for conferences, as well as CK-MLOps and MLPerf automations,
+ Grigori received useful feedback and suggestions for improvements to workflow automations:
+ https://learning.acm.org/techtalks/reproducibility.
+
+ Following this, Grigori began prototyping CK2 (later CM) to streamline CK1, CK-MLOps and MLPerf benchmarking.
+ The goal was to dramatically simplify CK1 workflows by introducing just a few core and portable automations,
+ which eventually evolved into `CM script` and `CM cache`.
+
+ At that time, the cTuning foundation hosted CK1 and all the prototypes for the CM framework at https://github.com/ctuning/ck:
+ [ref1](https://github.com/mlcommons/ck/commit/9e57934f4999db23052531e92160772ab831463a),
+ [ref2](https://github.com/mlcommons/ck/tree/9e57934f4999db23052531e92160772ab831463a),
+ [ref3](https://github.com/mlcommons/ck/tree/9e57934f4999db23052531e92160772ab831463a/incubator).
+
+* Sep 23, 2021: donated CK1, CK-MLOps, MLPerf automations and early prototypes of CM from the cTuning repository to MLCommons:
+ [ref1](https://web.archive.org/web/20240803140223/https://octo.ai/blog/octoml-joins-the-community-effort-to-democratize-mlperf-inference-benchmarking),
+ [ref2](https://github.com/mlcommons/ck/tree/228f80b0bf44610c8244ff0c3f6bec5bbd25aa6c/incubator),
+ [ref3](https://github.com/mlcommons/ck/tree/695c3843fd8121bbdde6c453cd6ec9503986b0c6?tab=readme-ov-file#author-and-coordinator),
+ [ref4](https://github.com/mlcommons/ck/tree/master/ck),
+ [ref5](https://github.com/mlcommons/ck-mlops).
+
+ Prepared MLCommons proposal for the creation of the [MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md),
+ aimed at fostering community-driven support for CK and CM developments to benefit everyone.
+
+* Jan, 2022: hired Arjun Suresh at OctoML to support and maintain CK1 framework and help prepare OctoML's MLPerf submissions using CK1.
+ Meanwhile, transitioned to focusing on CM and CM-MLOps development, building upon the prototypes created in 2021.
+
+* Mar 1, 2022: started developing cm-mlops: [ref](https://github.com/octoml/cm-mlops/commit/0ae94736a420dfa84f7417fc62d323303b8760c6).
+
+* Mar 24, 2022: after successfully stabilizing the initial prototype of CM, donated it to MLCommons to benefit the entire community:
+ [ref1](https://github.com/mlcommons/ck/tree/c7918ad544f26b6c499c2fc9c07431a9640fca5a/ck2),
+ [ref2](https://github.com/mlcommons/ck/tree/c7918ad544f26b6c499c2fc9c07431a9640fca5a/ck2#coordinators),
+ [ref3](https://github.com/mlcommons/ck/commit/3c146cb3c75a015363f7a96758adf6dcc43032d6),
+ [ref4](https://github.com/mlcommons/ck/commit/3c146cb3c75a015363f7a96758adf6dcc43032d6#diff-d97f0f6f5a32f16d6ed18b9600ffc650f7b25512685f7a2373436c492c6b52b3R48).
+
+* Apr 6, 2022: started transitioning previous MLOps and MLPerf automations from the mlcommons/ck-mlops format
+ to the new CM format using the cm-mlops repository (will be later renamed to cm4mlops):
+ [ref1](https://github.com/octoml/cm-mlops/commit/d1efdc30fb535ce144020d4e88f3ed768c933176),
+ [ref2](https://github.com/octoml/cm-mlops/blob/d1efdc30fb535ce144020d4e88f3ed768c933176/CONTRIBUTIONS).
+
+* Apr 22, 2022: began architecting "Intelligent Components" in the CM-MLOps repository,
+ which will be renamed to `CM Script` at a later stage:
+ [ref1](https://github.com/octoml/cm-mlops/commit/b335c609c47d2c547afe174d9df232652d57f4f8),
+ [ref2](https://github.com/octoml/cm-mlops/tree/b335c609c47d2c547afe174d9df232652d57f4f8),
+ [ref3](https://github.com/octoml/cm-mlops/blob/b335c609c47d2c547afe174d9df232652d57f4f8/CONTRIBUTIONS).
+
+ At the same time, prototyped other core CM automations, including IC, Docker, and Experiment:
+ [ref1](https://github.com/octoml/cm-mlops/tree/b335c609c47d2c547afe174d9df232652d57f4f8/automation),
+ [ref2](https://github.com/mlcommons/ck/commits/master/?before=7f66e2438bfe21b4ce2d08326a5168bb9e3132f6+7001).
+
+* Apr 28, 2022: donated CM-MLOps to MLCommons, which was later renamed to CM4MLOps:
+ [ref](https://github.com/mlcommons/ck/commit/456e4861056c0e39c4d689c03da91f90a44be058).
+
+* May 9, 2022: developed the initial set of core IC automations for MLOps (aka CM scripts):
+ [ref1](https://github.com/octoml/cm-mlops/commit/4a4a027f4088ce7e7abcec29c39d98981bf09d4c),
+ [ref2](https://github.com/octoml/cm-mlops/tree/4a4a027f4088ce7e7abcec29c39d98981bf09d4c),
+ [ref3](https://github.com/octoml/cm-mlops/blob/7692240becd6397a96c3975388913ea082002e7a/CONTRIBUTIONS).
+
+* May 11, 2022: After successfully prototyping CM and CM-MLOps, deprecated the CK1 framework in favor of CM.
+ Transferred Arjun Suresh to the CM project as a maintainer and tester for CM and CM-MLOps:
+ [ref](https://github.com/octoml/cm-mlops/blob/17405833665bc1e93820f9ff76deb28a0f543bdb/CONTRIBUTIONS).
+
+ Created a [file](https://github.com/mlcommons/ck/blob/master/cm-mlops/CHANGES.md)
+ to document and track our public developments at MLCommons.
+
+* Jun 8, 2022: renamed the 'IC' automation to the more intuitive 'CM script' automation.
+ [ref1](https://github.com/mlcommons/ck/tree/5ca4e2c33e58a660ac20a545d8aa5143ab6e8e81/cm-devops/automation/script),
+ [ref2](https://github.com/mlcommons/ck/tree/5ca4e2c33e58a660ac20a545d8aa5143ab6e8e81),
+ [ref3](https://github.com/octoml/cm-mlops/commit/7910fb7ffc62a617d987d2f887d6f9981ff80187).
+
+* Jun 16, 2022: prototyped the `CM cache` automation to facilitate caching and reuse of the outputs from CM scripts:
+ [ref1](https://github.com/mlcommons/ck/commit/1f81aae8cebd5567ec4ca55f693beaf32b49fb48),
+ [ref2](https://github.com/mlcommons/ck/tree/1f81aae8cebd5567ec4ca55f693beaf32b49fb48),
+ [ref3](https://github.com/mlcommons/ck/tree/1f81aae8cebd5567ec4ca55f693beaf32b49fb48?tab=readme-ov-file#contacts).
+
+* Sep 6, 2022: delivered CM demo to run MLPerf while deprecating CK1 automations for MLPerf:
+ [ref1](https://github.com/mlcommons/ck/commit/2c5d5c5c944ae5f252113c62af457c7a4c5e877a#diff-faac2c4ecfd0bfb928dafc938d3dad5651762fbb504a2544752a337294ee2573R224),
+ [ref2](https://github.com/mlcommons/ck/blob/2c5d5c5c944ae5f252113c62af457c7a4c5e877a/CONTRIBUTING.md#author-and-coordinator).
+
+ Welcomed Arjun Suresh as a contributor to CM automations for MLPerf: [ref](https://github.com/mlcommons/ck/blob/2c5d5c5c944ae5f252113c62af457c7a4c5e877a/CONTRIBUTING.md#contributors-in-alphabetical-order).
+
+* From September 2022: coordinated community development of CM and CM4MLOps
+ to [modularize and automate MLPerf benchmarks](https://docs.mlcommons.org/inference)
+ and support [reproducibility initiatives at ML and Systems conferences](https://cTuning.or/ae)
+ through the [MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md).
+
+ * Directed and financed the creation of (CM) automations to streamline the MLPerf power measurement processes.
+
+ * Proposed to use MLPerf benchmarks for the Student Cluster Competition, led the developments
+ and prepared a tutorial to run MLPerf inference at SCC'22 via CM: [ref](https://github.com/mlcommons/ck/blob/master/docs/tutorials/sc22-scc-mlperf.md)
+
+* April 2023: departed OctoML to focus on the development of the [CK playground](https://access.cKnowledge.org) and CM automations
+ to make Mlperf accessible to everyone. Hired Arjun Suresh to help with developments.
+
+ * Initiated and funded development of the [MLPerf explorer](https://github.com/ctuning/q2a-mlperf-visualizer)
+ to improve visualization of results
+
+* August 2023: organized the 1st mass-scale MLPerf community submission of 12217 inference benchmark v3.1 results
+ out of total 13351 results (including 90% of all power results) across diverse models, software and hardware
+ from different vendors via [open challenges](https://access.cknowledge.org/playground/?action=challenges) funded by cTuning.org :
+ [LinkedIn article](https://www.linkedin.com/pulse/new-milestone-make-mlperf-benchmarks-accessible-everyone-fursin/)
+ with results visualized by the [MLPerf explorer](https://github.com/ctuning/q2a-mlperf-visualizer),
+ [CM4MLOps challenges at GitHub](https://github.com/mlcommons/cm4mlops/tree/main/challenge).
+
+* February, 2024: proposed to use CM to automate [MLPerf automotive benchmark (ABTF)](https://mlcommons.org/working-groups/benchmarks/automotive/).
+
+ * moved my prototypes of the CM automation for ABTF to cm4abtf repo: [ref](https://github.com/mlcommons/cm4abtf/commit/f92b9f464de89a38a4bde149290dede2d94c8631)
+ * led further CM4ABTF developments funded by cTuning.org.
+
+* Starting in April 2024, began the gradual transfer of ongoing maintenance and enhancement
+ responsibilities for CM and CM4MLOps, including MLPerf automations, to MLCommons.
+ Welcomed Anandhu Sooraj as a maintainer and contributor to CM4MLOps with MLPerf automations.
+
+* Took a break from all development activities.
+
+* July 2024: started prototyping the next generation of CM (CMX and CMX4MLOps) with simpler interfaces
+ based on user feedback while maintaining backward compatibility.
+
+* 2025: continue developing CMX and CMX4MLOPs to make it easier to run and customize MLPerf inference, training
+ and other benchmarks across diverse models, datasets, software and hardware.
+
+For more details, please refer to the [white paper](https://arxiv.org/abs/2406.16791)
+and the [ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339).
diff --git a/cmx4mlops/cmx4mlops/repo/LICENSE.md b/cmx4mlops/cmx4mlops/repo/LICENSE.md
new file mode 100644
index 0000000000..f433b1a53f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/LICENSE.md
@@ -0,0 +1,177 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
diff --git a/cmx4mlops/cmx4mlops/repo/LICENSE.third-party.md b/cmx4mlops/cmx4mlops/repo/LICENSE.third-party.md
new file mode 100644
index 0000000000..faa0084585
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/LICENSE.third-party.md
@@ -0,0 +1 @@
+This CM repository may contain CM scripts with third-party files licensed under Apache2, BSD or MIT license.
diff --git a/cmx4mlops/cmx4mlops/repo/README.md b/cmx4mlops/cmx4mlops/repo/README.md
index e69de29bb2..49bd226a87 100644
--- a/cmx4mlops/cmx4mlops/repo/README.md
+++ b/cmx4mlops/cmx4mlops/repo/README.md
@@ -0,0 +1,67 @@
+## Unified and cross-platform CM interface for DevOps, MLOps and MLPerf
+
+[](LICENSE.md)
+[](https://github.com/mlcommons/ck/tree/master/cm/cmind)
+[](https://pypi.org/project/cmind).
+[](https://pepy.tech/project/cm4mlops)
+
+[](https://github.com/mlcommons/cm4mlops/actions/workflows/test-cm-script-features.yml)
+[](https://github.com/mlcommons/cm4mlops/actions/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml)
+[](https://github.com/mlcommons/cm4mlops/actions/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml)
+[](https://github.com/mlcommons/cm4mlops/actions/workflows/test-mlperf-inference-abtf-poc.yml)
+[](https://github.com/mlcommons/cm4mlops/actions/workflows/test-qaic-compute-sdk-build.yml)
+[](https://github.com/mlcommons/cm4mlops/actions/workflows/test-qaic-software-kit.yml)
+
+
+# CM4MLOps repository
+
+**CM4MLOps** repository is powered by the [Collective Mind automation framework](https://github.com/mlcommons/ck/tree/master/cm),
+a [Python package](https://pypi.org/project/cmind/) with a CLI and API designed for creating and managing automations.
+
+Two key automations developed using CM are **Script** and **Cache**, which streamline machine learning (ML) workflows,
+including managing Docker runs. Both Script and Cache automations are part of the **cm4mlops** repository.
+
+The CM scripts, also housed in this repository, consist of hundreds of modular Python-wrapped scripts accompanied
+by `yaml` metadata, enabling the creation of robust and flexible ML workflows.
+
+- **CM Scripts Documentation**: [https://docs.mlcommons.org/cm4mlops/](https://docs.mlcommons.org/cm4mlops/)
+- **CM CLI Documentation**: [https://docs.mlcommons.org/ck/specs/cm-cli/](https://docs.mlcommons.org/ck/specs/cm-cli/)
+
+The `mlperf-branch` of the **cm4mlops** repository is dedicated to developments specific to MLPerf Inference.
+Please submit any pull requests (PRs) to this branch. For more information about using CM for MLPerf Inference,
+refer to the [MLPerf Inference Documentation](https://docs.mlcommons.org/inference/).
+
+## License
+
+[Apache 2.0](LICENSE.md)
+
+## Copyright
+
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and encourage collaborative development.
+
+## Maintainer(s)
+
+* MLCommons
+
+## CM author
+
+[Grigori Fursin](https://cKnowledge.org/gfursin)
+
+## CM concepts
+
+Check our [ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) and the [white paper](https://arxiv.org/abs/2406.16791).
+
+## CM script developers
+
+Arjun Suresh, Anandhu Sooraj, Grigori Fursin
+
+## Parent project
+
+Visit the [parent Collective Knowledge project](https://github.com/mlcommons/ck) for further details.
+
+## Citing this project
+
+If you found the CM automations helpful, kindly reference this article:
+[ [ArXiv](https://arxiv.org/abs/2406.16791) ]
diff --git a/cmx4mlops/cmx4mlops/repo/automation/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/COPYRIGHT.md
new file mode 100644
index 0000000000..2a313520bb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort.
diff --git a/cmx4mlops/cmx4mlops/repo/automation/cache/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/cache/COPYRIGHT.md
new file mode 100644
index 0000000000..2a313520bb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/cache/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort.
diff --git a/cmx4mlops/cmx4mlops/repo/automation/cache/README-extra.md b/cmx4mlops/cmx4mlops/repo/automation/cache/README-extra.md
new file mode 100644
index 0000000000..84d2741794
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/cache/README-extra.md
@@ -0,0 +1,71 @@
+[ [Back to index](../../../docs/README.md) ]
+
+# CM "cache" automation
+
+*We suggest you to check [CM introduction](https://github.com/mlcommons/ck/blob/master/docs/introduction-cm.md)
+ and [CM CLI/API](https://github.com/mlcommons/ck/blob/master/docs/interface.md) to understand CM motivation and concepts.*
+
+## CM script CLI
+
+Whenever a [given CM script]() caches the output, you can find it
+
+Whenever a [CM script](https://access.cknowledge.org/playground/?action=scripts)
+caches its output (such as downloaded model or pre-processed data set or built code),
+you can find it using the CM "cache" automation as follows:
+
+```bash
+cm show cache
+```
+
+You can prune cache entries by tags and variations:
+```bash
+cm show cache --tags=ml-model
+cm show cache --tags=python
+```
+
+You can find a path to a given cache artifact as follows:
+```bash
+cm find cache --tags=ml-model,bert
+```
+
+You can delete one or more cache artifacts as follows:
+```bash
+cm rm cache --tags=ml-model
+```
+
+You can skip user prompt by adding `-f` flag as follows:
+```bash
+cm rm cache --tags=ml-model -f
+```
+
+You can clean the whole cache as follows:
+```bash
+cm rm cache -f
+```
+
+## CM python API
+
+You can access the same functionality via CM Python API as follows:
+
+```python
+
+import cmind
+
+output = cmind.access({'action':'show',
+ 'automation':'cache,541d6f712a6b464e'})
+
+if output['return']>0:
+ cmind.error(output)
+
+artifacts = output['list']
+
+for artifact in artifacts:
+ print ('')
+ print (artifact.path)
+ print (artifact.meta)
+
+```
+
+## Related
+
+* [CM "script" automation](../script/README-extra.md)
diff --git a/cmx4mlops/cmx4mlops/repo/automation/cache/README.md b/cmx4mlops/cmx4mlops/repo/automation/cache/README.md
new file mode 100644
index 0000000000..0a3114d3b5
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/cache/README.md
@@ -0,0 +1,87 @@
+*This README is automatically generated - don't edit! See [extra README](README-extra.md) for extra notes!*
+
+### Automation actions
+
+#### test
+
+ * CM CLI: ```cm test cache``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L15))
+ * CM CLI with UID: ```cm test cache,541d6f712a6b464e``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L15))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'test'
+ 'automation':'cache,541d6f712a6b464e'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L15)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### show
+
+ * CM CLI: ```cm show cache``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L54))
+ * CM CLI with UID: ```cm show cache,541d6f712a6b464e``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L54))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'show'
+ 'automation':'cache,541d6f712a6b464e'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L54)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### search
+
+ * CM CLI: ```cm search cache``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L153))
+ * CM CLI with UID: ```cm search cache,541d6f712a6b464e``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L153))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'search'
+ 'automation':'cache,541d6f712a6b464e'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L153)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### copy_to_remote
+
+ * CM CLI: ```cm copy_to_remote cache``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L186))
+ * CM CLI with UID: ```cm copy_to_remote cache,541d6f712a6b464e``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L186))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'copy_to_remote'
+ 'automation':'cache,541d6f712a6b464e'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L186)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+### Maintainers
+
+* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/automation/cache/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/cache/_cm.json
new file mode 100644
index 0000000000..ac383f937c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/cache/_cm.json
@@ -0,0 +1,12 @@
+{
+ "alias": "cache",
+ "automation_alias": "automation",
+ "automation_uid": "bbeb15d8f0a944a4",
+ "desc": "Caching cross-platform CM scripts",
+ "developers": "[Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin)",
+ "sort": 900,
+ "tags": [
+ "automation"
+ ],
+ "uid": "541d6f712a6b464e"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/cache/module.py b/cmx4mlops/cmx4mlops/repo/automation/cache/module.py
new file mode 100644
index 0000000000..885778800c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/cache/module.py
@@ -0,0 +1,249 @@
+# Author: Grigori Fursin
+# Contributors: Arjun Suresh, Anandhu Sooraj
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+import os
+
+from cmind.automation import Automation
+from cmind import utils
+
+
+class CAutomation(Automation):
+ """
+ Automation actions
+ """
+
+ ############################################################
+ def __init__(self, cmind, automation_file):
+ super().__init__(cmind, __file__)
+
+ ############################################################
+ def test(self, i):
+ """
+ Test automation
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ automation (str): automation as CM string object
+
+ parsed_automation (list): prepared in CM CLI or CM access function
+ [ (automation alias, automation UID) ] or
+ [ (automation alias, automation UID), (automation repo alias, automation repo UID) ]
+
+ (artifact) (str): artifact as CM string object
+
+ (parsed_artifact) (list): prepared in CM CLI or CM access function
+ [ (artifact alias, artifact UID) ] or
+ [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
+
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * Output from this automation action
+
+ """
+
+ import json
+ print(json.dumps(i, indent=2))
+
+ return {'return': 0}
+
+ ############################################################
+ def show(self, i):
+ """
+ Show cache
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ (env) (bool): if True, show env from cm-cached-state.json
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * Output from this automation action
+
+ """
+ import json
+
+ # Check parsed automation
+ if 'parsed_automation' not in i:
+ return {'return': 1, 'error': 'automation is not specified'}
+
+ console = i.get('out') == 'con'
+
+ show_env = i.get('env', False)
+
+# Moved to search function
+# # Check simplified CMD: cm show cache "get python"
+# # If artifact has spaces, treat them as tags!
+# artifact = i.get('artifact','')
+# tags = i.get('tags','').strip()
+# if ' ' in artifact or ',' in artifact:
+# del(i['artifact'])
+# if 'parsed_artifact' in i: del(i['parsed_artifact'])
+#
+# new_tags = artifact.replace(' ',',')
+# tags = new_tags if tags=='' else new_tags+','+tags
+#
+# i['tags'] = tags
+
+ # Find CM artifact(s)
+ i['out'] = None
+ r = self.search(i)
+
+ if r['return'] > 0:
+ return r
+
+ lst = r['list']
+ for artifact in sorted(lst, key=lambda x: sorted(x.meta['tags'])):
+ # for artifact in lst:
+ path = artifact.path
+ meta = artifact.meta
+ dependent_cached_path = meta.get(
+ 'dependent_cached_path', '')
+ if dependent_cached_path and not os.path.exists(
+ dependent_cached_path):
+ continue
+
+ original_meta = artifact.original_meta
+
+ alias = meta.get('alias', '')
+ uid = meta.get('uid', '')
+
+ tags = meta.get('tags', [])
+ tags1 = sorted([x for x in tags if not x.startswith('_')])
+ tags2 = sorted([x for x in tags if x.startswith('_')])
+ tags = tags1 + tags2
+
+ version = meta.get('version', '')
+
+ if console:
+ print('')
+# print ('* UID: {}'.format(uid))
+ print('* Tags: {}'.format(','.join(tags)))
+ print(' Path: {}'.format(path))
+ if version != '':
+ print(' Version: {}'.format(version))
+
+ if show_env and console:
+ path_to_cached_state_file = os.path.join(
+ path, 'cm-cached-state.json')
+
+ if os.path.isfile(path_to_cached_state_file):
+ r = utils.load_json(file_name=path_to_cached_state_file)
+ if r['return'] > 0:
+ return r
+
+ # Update env and state from cache!
+ cached_state = r['meta']
+
+ new_env = cached_state.get('new_env', {})
+ if len(new_env) > 0:
+ print(' New env:')
+ print(
+ json.dumps(
+ new_env,
+ indent=6,
+ sort_keys=True).replace(
+ '{',
+ '').replace(
+ '}',
+ ''))
+
+ new_state = cached_state.get('new_state', {})
+ if len(new_state) > 0:
+ print(' New state:')
+ print(json.dumps(new_env, indent=6, sort_keys=True))
+
+ return {'return': 0, 'list': lst}
+
+ ############################################################
+ def search(self, i):
+ """
+ Overriding the automation search function to add support for a simplified CMD with tags with spaces
+
+ TBD: add input/output description
+ """
+ # Check simplified CMD: cm show cache "get python"
+ # If artifact has spaces, treat them as tags!
+ artifact = i.get('artifact', '')
+ tags = i.get('tags', '')
+
+ # Tags may be a list (if comes internally from CM scripts) or string if
+ # comes from CMD
+ if not isinstance(tags, list):
+ tags = tags.strip()
+
+ if ' ' in artifact: # or ',' in artifact:
+ del (i['artifact'])
+ if 'parsed_artifact' in i:
+ del (i['parsed_artifact'])
+
+ new_tags = artifact.replace(' ', ',')
+ tags = new_tags if tags == '' else new_tags + ',' + tags
+
+ i['tags'] = tags
+
+ # Force automation when reruning access with processed input
+ i['automation'] = 'cache,541d6f712a6b464e'
+ i['action'] = 'search'
+ # Avoid recursion - use internal CM add function to add the script
+ # artifact
+ i['common'] = True
+
+ # Find CM artifact(s)
+ return self.cmind.access(i)
+
+ ############################################################
+
+ def copy_to_remote(self, i):
+ """
+ Add CM automation.
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ parsed_artifact (list): prepared in CM CLI or CM access function
+ [ (artifact alias, artifact UID) ] or
+ [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
+
+ (repos) (str): list of repositories to search for automations (internal & mlcommons@ck by default)
+
+ (output_dir) (str): output directory (./ by default)
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ """
+
+ return utils.call_internal_module(
+ self, __file__, 'module_misc', 'copy_to_remote', i)
diff --git a/cmx4mlops/cmx4mlops/repo/automation/cache/module_misc.py b/cmx4mlops/cmx4mlops/repo/automation/cache/module_misc.py
new file mode 100644
index 0000000000..d83d9f763a
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/cache/module_misc.py
@@ -0,0 +1,122 @@
+# Author: Grigori Fursin
+# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+import os
+from cmind import utils
+
+
+############################################################
+def copy_to_remote(i):
+ """
+ Add CM automation.
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ parsed_artifact (list): prepared in CM CLI or CM access function
+ [ (artifact alias, artifact UID) ] or
+ [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
+
+ (repos) (str): list of repositories to search for automations (internal & mlcommons@ck by default)
+
+ (output_dir) (str): output directory (./ by default)
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ """
+
+ self_module = i['self_module']
+
+ remote_host = i.get('remote_host')
+ if not remote_host:
+ return {'return': 1,
+ 'error': 'Please input remote host_name/IP via --remote_host'}
+ remote_cm_repos_location = i.get(
+ 'remote_cm_repos_location', os.path.join(
+ "/home", os.getlogin(), "CM", "repos"))
+ remote_cm_cache_location = os.path.join(
+ remote_cm_repos_location, "local", "cache")
+
+ remote_port = i.get('remote_port', '22')
+ remote_user = i.get('remote_user', os.getlogin())
+
+ tag_string = i['tags']
+ tag_string += ",-tmp"
+
+ cm_input = {'action': 'show',
+ 'automation': 'cache',
+ 'tags': f'{tag_string}',
+ 'quiet': True
+ }
+ r = self_module.cmind.access(cm_input)
+ if r['return'] > 0:
+ return r
+
+ if len(r['list']) == 0:
+ pass # fixme
+ elif len(r['list']) > 1:
+ print("Multiple cache entries found: ")
+ for k in sorted(r['list'], key=lambda x: x.meta.get('alias', '')):
+ print(k.path)
+ x = input("Would you like to copy them all? Y/n: ")
+ if x.lower() == 'n':
+ return {'return': 0}
+
+ import json
+
+ for k in sorted(r['list'], key=lambda x: x.meta.get('alias', '')):
+ path = k.path
+ cacheid = os.path.basename(path)
+
+ copy_cmd = f"rsync -avz --exclude cm-cached-state.json -e 'ssh -p {remote_port}' {path} {remote_user}@{remote_host}:{remote_cm_cache_location}"
+ print(copy_cmd)
+ os.system(copy_cmd)
+
+ cm_cached_state_json_file = os.path.join(path, "cm-cached-state.json")
+ if not os.path.exists(cm_cached_state_json_file):
+ return {'return': 1,
+ 'error': f'cm-cached-state.json file missing in {path}'}
+
+ with open(cm_cached_state_json_file, "r") as f:
+ cm_cached_state = json.load(f)
+
+ new_env = cm_cached_state['new_env']
+ new_state = cm_cached_state['new_state'] # Todo fix new state
+ cm_repos_path = os.environ.get(
+ 'CM_REPOS', os.path.join(
+ os.path.expanduser("~"), "CM", "repos"))
+ cm_cache_path = os.path.realpath(
+ os.path.join(cm_repos_path, "local", "cache"))
+
+ for key, val in new_env.items():
+
+
+if isinstance(val, if ) new_env[key] = val.replace(
+ cm_cache_path, remote_cm_cache_location)
+
+ with open("tmp_remote_cached_state.json", "w") as f:
+ json.dump(cm_cached_state, f, indent=2)
+
+ remote_cached_state_file_location = os.path.join(
+ remote_cm_cache_location, cacheid, "cm-cached-state.json")
+ copy_cmd = f"rsync -avz -e 'ssh -p {remote_port}' tmp_remote_cached_state.json {remote_user}@{remote_host}:{remote_cached_state_file_location}"
+ print(copy_cmd)
+ os.system(copy_cmd)
+
+ return {'return': 0}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/cfg/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/cfg/COPYRIGHT.md
new file mode 100644
index 0000000000..2a313520bb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/cfg/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort.
diff --git a/cmx4mlops/cmx4mlops/repo/automation/cfg/README-extra.md b/cmx4mlops/cmx4mlops/repo/automation/cfg/README-extra.md
new file mode 100644
index 0000000000..cc94030ab3
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/cfg/README-extra.md
@@ -0,0 +1,8 @@
+Examples:
+
+```bash
+cm set cfg default
+cm set cfg default --key.script.silent
+cm set cfg default --key.script.silent-
+
+```
diff --git a/cmx4mlops/cmx4mlops/repo/automation/cfg/README.md b/cmx4mlops/cmx4mlops/repo/automation/cfg/README.md
new file mode 100644
index 0000000000..3c82852c8d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/cfg/README.md
@@ -0,0 +1,27 @@
+*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!*
+
+### Automation actions
+
+#### test
+
+ * CM CLI: ```cm test cfg``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cfg/module.py#L15))
+ * CM CLI with UID: ```cm test cfg,88dce9c160324c5d``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cfg/module.py#L15))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'test'
+ 'automation':'cfg,88dce9c160324c5d'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cfg/module.py#L15)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+### Maintainers
+
+* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/automation/cfg/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/cfg/_cm.json
new file mode 100644
index 0000000000..9a1dc030e8
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/cfg/_cm.json
@@ -0,0 +1,12 @@
+{
+ "action_substitutions": {
+ "set":"xset"
+ },
+ "alias": "cfg",
+ "automation_alias": "automation",
+ "automation_uid": "bbeb15d8f0a944a4",
+ "tags": [
+ "automation"
+ ],
+ "uid": "88dce9c160324c5d"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/cfg/module.py b/cmx4mlops/cmx4mlops/repo/automation/cfg/module.py
new file mode 100644
index 0000000000..4b08f39265
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/cfg/module.py
@@ -0,0 +1,270 @@
+# Universal cfg for CM automations
+#
+# Author: Grigori Fursin
+# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+import os
+
+from cmind.automation import Automation
+from cmind import utils
+
+
+class CAutomation(Automation):
+ """
+ Automation actions
+ """
+
+ ############################################################
+ def __init__(self, cmind, automation_file):
+ super().__init__(cmind, __file__)
+
+ ############################################################
+ def test(self, i):
+ """
+ Test automation
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ automation (str): automation as CM string object
+
+ parsed_automation (list): prepared in CM CLI or CM access function
+ [ (automation alias, automation UID) ] or
+ [ (automation alias, automation UID), (automation repo alias, automation repo UID) ]
+
+ (artifact) (str): artifact as CM string object
+
+ (parsed_artifact) (list): prepared in CM CLI or CM access function
+ [ (artifact alias, artifact UID) ] or
+ [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
+
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * Output from this automation action
+
+ """
+
+ import json
+ print(json.dumps(i, indent=2))
+
+ return {'return': 0}
+
+ ############################################################
+ def xset(self, i):
+ """
+ Set keys in configuration
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ (artifact) (str): CM artifact with configuration
+ (tags) (str): list of tags to find CM artifact with configuration
+
+ (key) (dict): updating config
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * Output from this automation action
+
+ """
+
+ import json
+
+ r = self._find_cfg_artifact(i)
+ if r['return'] > 0:
+ return r
+
+ # Path to cfg
+ path = r['path']
+ path_to_config = r['path_to_config']
+ config = r['config']
+
+ # Clean input to leave only keys for the configuration
+ new_config = i.get('key', {})
+
+ # If new config is empty, just print existing config
+ if len(new_config) > 0:
+ # Check if need to delete some
+ def check_to_delete(d):
+
+ for k in list(d.keys()):
+ v = d[k]
+ if isinstance(v, dict):
+ check_to_delete(v)
+ else:
+ if k.endswith('-'):
+ if k[:-1] in d:
+ del (d[k[:-1]])
+ del (d[k])
+ else:
+ vsl = str(v).lower()
+ if vsl == 'none':
+ v = None
+ elif vsl == 'false':
+ v = False
+ elif vsl == 'true':
+ v = True
+
+ d[k] = v
+
+ utils.merge_dicts({'dict1': config,
+ 'dict2': new_config,
+ 'append_lists': True,
+ 'append_unique': True})
+
+ check_to_delete(config)
+
+ r = utils.save_json(path_to_config, config)
+ if r['return'] > 0:
+ return r
+
+ # Print config
+ print('Config:')
+ print('')
+ print(json.dumps(config, indent=2))
+
+ return {'return': 0}
+
+ ############################################################
+ def load(self, i):
+ """
+ Load configuration
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ (artifact) (str): CM artifact with configuration
+ (tags) (str): list of tags to find CM artifact with configuration
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * Output from this automation action
+
+ """
+
+ return self._find_cfg_artifact(i)
+
+ ############################################################
+ def _find_cfg_artifact(self, i):
+ """
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ (artifact) (str): CM artifact with configuration
+ (tags) (str): list of tags to find CM artifact with configuration
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * Output from this automation action
+
+ """
+
+ # Clean input to find artifact
+ ii = utils.sub_input(i, self.cmind.cfg['artifact_keys'] + ['tags'])
+
+ parsed_artifact = i.get('parsed_artifact', [])
+
+ artifact_obj = parsed_artifact[0] if len(parsed_artifact) > 0 else None
+ artifact_repo = parsed_artifact[1] if len(
+ parsed_artifact) > 1 else None
+
+ artifact = i.get('artifact', '')
+
+ if artifact == '':
+ ii['artifact'] = 'default'
+
+ tags = ii.get('tags', '')
+
+ if 'cm-universal-cfg' not in tags:
+ if tags != '':
+ tags += ','
+ tags += 'cm-universal-cfg'
+
+ ii['tags'] = tags
+
+ automation = ii['automation']
+ if automation != '.' and ',' not in automation:
+ ii['automation'] = automation + ',' + self.meta['uid']
+
+ # Add placeholder (use common action)
+
+ ii['action'] = 'find'
+ ii['out'] = ''
+ # Avoid recursion - use internal CM add function to add the script
+ # artifact
+ ii['common'] = True
+
+ r = self.cmind.access(ii)
+ if r['return'] > 0:
+ return r
+
+ lst = r['list']
+
+ if len(lst) == 0:
+ ii['action'] = 'add'
+ ii['meta'] = {}
+
+ # Tags must be unique for default
+ r = self.cmind.access(ii)
+ if r['return'] > 0:
+ return r
+
+ path = r['path']
+ elif len(lst) > 1:
+ return {
+ 'return': 1, 'error': 'ambiguity in cfg name - more than 1 CM artifact found'}
+ else:
+ path = lst[0].path
+
+ # Check if has config
+ path_to_cfg = os.path.join(path, 'config.json')
+
+ config = {}
+ if os.path.isfile(path_to_cfg):
+ r = utils.load_json(path_to_cfg)
+ if r['return'] > 0:
+ return r
+
+ config = r['meta']
+
+ return {'return': 0, 'path': path,
+ 'path_to_config': path_to_cfg, 'config': config}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/challenge/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/challenge/COPYRIGHT.md
new file mode 100644
index 0000000000..2a313520bb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/challenge/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort.
diff --git a/cmx4mlops/cmx4mlops/repo/automation/challenge/README.md b/cmx4mlops/cmx4mlops/repo/automation/challenge/README.md
new file mode 100644
index 0000000000..2db03e8b16
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/challenge/README.md
@@ -0,0 +1,27 @@
+*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!*
+
+### Automation actions
+
+#### test
+
+ * CM CLI: ```cm test challenge``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/challenge/module.py#L15))
+ * CM CLI with UID: ```cm test challenge,3d84abd768f34e08``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/challenge/module.py#L15))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'test'
+ 'automation':'challenge,3d84abd768f34e08'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/challenge/module.py#L15)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+### Maintainers
+
+* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/automation/challenge/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/challenge/_cm.json
new file mode 100644
index 0000000000..a4f4164527
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/challenge/_cm.json
@@ -0,0 +1,9 @@
+{
+ "alias": "challenge",
+ "automation_alias": "automation",
+ "automation_uid": "bbeb15d8f0a944a4",
+ "tags": [
+ "automation"
+ ],
+ "uid": "3d84abd768f34e08"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/challenge/module.py b/cmx4mlops/cmx4mlops/repo/automation/challenge/module.py
new file mode 100644
index 0000000000..963ab43b6f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/challenge/module.py
@@ -0,0 +1,66 @@
+# Author: Grigori Fursin
+# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+import os
+
+from cmind.automation import Automation
+from cmind import utils
+
+
+class CAutomation(Automation):
+ """
+ Automation actions
+ """
+
+ ############################################################
+ def __init__(self, cmind, automation_file):
+ super().__init__(cmind, __file__)
+
+ ############################################################
+ def test(self, i):
+ """
+ Test automation
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ automation (str): automation as CM string object
+
+ parsed_automation (list): prepared in CM CLI or CM access function
+ [ (automation alias, automation UID) ] or
+ [ (automation alias, automation UID), (automation repo alias, automation repo UID) ]
+
+ (artifact) (str): artifact as CM string object
+
+ (parsed_artifact) (list): prepared in CM CLI or CM access function
+ [ (artifact alias, artifact UID) ] or
+ [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
+
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * Output from this automation action
+
+ """
+
+ import json
+ print(json.dumps(i, indent=2))
+
+ return {'return': 0}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py b/cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py
index 3d17c08f7e..f63b0d44b9 100644
--- a/cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py
+++ b/cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py
@@ -81,3 +81,14 @@ def test(self, i):
return {'return':0}
+
+ ############################################################
+ def run(self, i):
+
+ import json
+ print (json.dumps(i, indent=2))
+
+ v = i.get('test', 'default')
+ v2 = i.get('test2', 'default')
+
+ return {'return':0, 'new_key':v, 'new_key2':v2}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/contributor/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/contributor/COPYRIGHT.md
new file mode 100644
index 0000000000..2a313520bb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/contributor/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort.
diff --git a/cmx4mlops/cmx4mlops/repo/automation/contributor/README.md b/cmx4mlops/cmx4mlops/repo/automation/contributor/README.md
new file mode 100644
index 0000000000..df1f4e3d6f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/contributor/README.md
@@ -0,0 +1,47 @@
+*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!*
+
+### Automation actions
+
+#### test
+
+ * CM CLI: ```cm test contributor``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/contributor/module.py#L15))
+ * CM CLI with UID: ```cm test contributor,68eae17b590d4f8f``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/contributor/module.py#L15))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'test'
+ 'automation':'contributor,68eae17b590d4f8f'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/contributor/module.py#L15)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### add
+
+ * CM CLI: ```cm add contributor``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/contributor/module.py#L54))
+ * CM CLI with UID: ```cm add contributor,68eae17b590d4f8f``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/contributor/module.py#L54))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'add'
+ 'automation':'contributor,68eae17b590d4f8f'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/contributor/module.py#L54)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+### Maintainers
+
+* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/automation/contributor/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/contributor/_cm.json
new file mode 100644
index 0000000000..008f7d54c9
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/contributor/_cm.json
@@ -0,0 +1,9 @@
+{
+ "alias": "contributor",
+ "automation_alias": "automation",
+ "automation_uid": "bbeb15d8f0a944a4",
+ "tags": [
+ "automation"
+ ],
+ "uid": "68eae17b590d4f8f"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/contributor/module.py b/cmx4mlops/cmx4mlops/repo/automation/contributor/module.py
new file mode 100644
index 0000000000..a2d6954ad5
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/contributor/module.py
@@ -0,0 +1,174 @@
+# Author: Grigori Fursin
+# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+import os
+
+from cmind.automation import Automation
+from cmind import utils
+
+
+class CAutomation(Automation):
+ """
+ Automation actions
+ """
+
+ ############################################################
+ def __init__(self, cmind, automation_file):
+ super().__init__(cmind, __file__)
+
+ ############################################################
+ def test(self, i):
+ """
+ Test automation
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ automation (str): automation as CM string object
+
+ parsed_automation (list): prepared in CM CLI or CM access function
+ [ (automation alias, automation UID) ] or
+ [ (automation alias, automation UID), (automation repo alias, automation repo UID) ]
+
+ (artifact) (str): artifact as CM string object
+
+ (parsed_artifact) (list): prepared in CM CLI or CM access function
+ [ (artifact alias, artifact UID) ] or
+ [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
+
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * Output from this automation action
+
+ """
+
+ import json
+ print(json.dumps(i, indent=2))
+
+ return {'return': 0}
+
+ ############################################################
+ def add(self, i):
+ """
+ Add CM script
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ """
+
+ self_automation = self.meta['alias'] + ',' + self.meta['uid']
+
+ console = i.get('out') == 'con'
+
+ artifact = i.get('artifact', '')
+ if ':' not in artifact:
+ artifact = 'mlcommons@ck:' + artifact
+
+ j = artifact.find(':')
+ name = artifact[j + 1:]
+
+ # Check info
+ if name == '':
+ name = input('Enter your name: ').strip()
+ if name == '':
+ return {'return': 1, 'error': 'name can\'t be empty'}
+
+ artifact += name
+
+ # Check if doesn't exist
+ r = self.cmind.access({'action': 'find',
+ 'automation': self_automation,
+ 'artifact': artifact})
+ if r['return'] > 0:
+ return r
+ elif r['return'] == 0 and len(r['list']) > 0:
+ return {'return': 1, 'error': 'CM artifact with name {} already exists in {}'.format(
+ name, r['list'][0].path)}
+
+ meta = i.get('meta', {})
+
+ # Prepare meta
+ org = meta.get('organization', '')
+ if org == '':
+ org = input('Enter your organization (optional): ').strip()
+
+ url = input('Enter your webpage (optional): ').strip()
+
+ tags = input(
+ 'Enter tags of your challenges separate by comma (you can add them later): ').strip()
+
+ if meta.get('name', '') == '':
+ meta = {'name': name}
+
+ if org != '':
+ meta['organization'] = org
+
+ if url != '':
+ meta['urls'] = [url]
+
+ if tags != '':
+ meta['ongoing'] = tags.split(',')
+
+ # Add placeholder (use common action)
+ i['out'] = 'con'
+ # Avoid recursion - use internal CM add function to add the script
+ # artifact
+ i['common'] = True
+
+ i['action'] = 'add'
+ i['automation'] = self_automation
+ i['artifact'] = artifact
+
+ i['meta'] = meta
+
+ print('')
+
+ r = self.cmind.access(i)
+ if r['return'] > 0:
+ return r
+
+ path = r['path']
+
+ path2 = os.path.dirname(path)
+
+ print('')
+ print(
+ 'Please go to {}, add your directory to Git, commit and create PR:'.format(path2))
+ print('')
+ print('cd {}'.format(path2))
+ print('git add "{}"'.format(name))
+ print('git commit "{}"'.format(name))
+ print('')
+ print('Please join https://discord.gg/JjWNWXKxwT to discuss challenges!')
+ print('Looking forward to your contributions!')
+
+ return r
diff --git a/cmx4mlops/cmx4mlops/repo/automation/data/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/data/COPYRIGHT.md
new file mode 100644
index 0000000000..2a313520bb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/data/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort.
diff --git a/cmx4mlops/cmx4mlops/repo/automation/data/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/data/_cm.json
new file mode 100644
index 0000000000..7dd9a139f3
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/data/_cm.json
@@ -0,0 +1,9 @@
+{
+ "alias": "data",
+ "automation_alias": "automation",
+ "automation_uid": "bbeb15d8f0a944a4",
+ "tags": [
+ "automation"
+ ],
+ "uid": "84d8ef6914bf4d78"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/data/module.py b/cmx4mlops/cmx4mlops/repo/automation/data/module.py
new file mode 100644
index 0000000000..963ab43b6f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/data/module.py
@@ -0,0 +1,66 @@
+# Author: Grigori Fursin
+# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+import os
+
+from cmind.automation import Automation
+from cmind import utils
+
+
+class CAutomation(Automation):
+ """
+ Automation actions
+ """
+
+ ############################################################
+ def __init__(self, cmind, automation_file):
+ super().__init__(cmind, __file__)
+
+ ############################################################
+ def test(self, i):
+ """
+ Test automation
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ automation (str): automation as CM string object
+
+ parsed_automation (list): prepared in CM CLI or CM access function
+ [ (automation alias, automation UID) ] or
+ [ (automation alias, automation UID), (automation repo alias, automation repo UID) ]
+
+ (artifact) (str): artifact as CM string object
+
+ (parsed_artifact) (list): prepared in CM CLI or CM access function
+ [ (artifact alias, artifact UID) ] or
+ [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
+
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * Output from this automation action
+
+ """
+
+ import json
+ print(json.dumps(i, indent=2))
+
+ return {'return': 0}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/docker/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/docker/COPYRIGHT.md
new file mode 100644
index 0000000000..2a313520bb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/docker/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort.
diff --git a/cmx4mlops/cmx4mlops/repo/automation/docker/README.md b/cmx4mlops/cmx4mlops/repo/automation/docker/README.md
new file mode 100644
index 0000000000..c6ef9a3842
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/docker/README.md
@@ -0,0 +1,27 @@
+*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!*
+
+### Automation actions
+
+#### test
+
+ * CM CLI: ```cm test docker``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/docker/module.py#L15))
+ * CM CLI with UID: ```cm test docker,2d90be7cab6e4d9f``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/docker/module.py#L15))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'test'
+ 'automation':'docker,2d90be7cab6e4d9f'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/docker/module.py#L15)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+### Maintainers
+
+* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/automation/docker/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/docker/_cm.json
new file mode 100644
index 0000000000..11a5085d0e
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/docker/_cm.json
@@ -0,0 +1,11 @@
+{
+ "alias": "docker",
+ "automation_alias": "automation",
+ "automation_uid": "bbeb15d8f0a944a4",
+ "desc": "Managing modular docker containers (under development)",
+ "developers": "[Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin)",
+ "tags": [
+ "automation"
+ ],
+ "uid": "2d90be7cab6e4d9f"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/docker/module.py b/cmx4mlops/cmx4mlops/repo/automation/docker/module.py
new file mode 100644
index 0000000000..4b49bbd3cb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/docker/module.py
@@ -0,0 +1,65 @@
+# Author: Grigori Fursin
+# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+import os
+
+from cmind.automation import Automation
+from cmind import utils
+
+
+class CAutomation(Automation):
+ """
+ CM "docker" automation actions
+ """
+
+ ############################################################
+ def __init__(self, cmind, automation_file):
+ super().__init__(cmind, __file__)
+
+ ############################################################
+ def test(self, i):
+ """
+ Test automation
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ automation (str): automation as CM string object
+
+ (artifact) (str): artifact as CM string object
+
+ parsed_automation (list): prepared in CM CLI or CM access function
+ [ (automation alias, automation UID) ] or
+ [ (automation alias, automation UID), (automation repo alias, automation repo UID) ]
+
+ (parsed_artifact) (list): prepared in CM CLI or CM access function
+ [ (artifact alias, artifact UID) ] or
+ [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
+
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * Output from this automation action
+ """
+
+ import json
+ print(json.dumps(i, indent=2))
+
+ return {'return': 0}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/docs/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/docs/COPYRIGHT.md
new file mode 100644
index 0000000000..2a313520bb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/docs/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort.
diff --git a/cmx4mlops/cmx4mlops/repo/automation/docs/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/docs/_cm.json
new file mode 100644
index 0000000000..6945baccaf
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/docs/_cm.json
@@ -0,0 +1,9 @@
+{
+ "alias": "docs",
+ "automation_alias": "automation",
+ "automation_uid": "bbeb15d8f0a944a4",
+ "tags": [
+ "automation"
+ ],
+ "uid": "9558c9e6ca124065"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/docs/module.py b/cmx4mlops/cmx4mlops/repo/automation/docs/module.py
new file mode 100644
index 0000000000..963ab43b6f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/docs/module.py
@@ -0,0 +1,66 @@
+# Author: Grigori Fursin
+# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+import os
+
+from cmind.automation import Automation
+from cmind import utils
+
+
+class CAutomation(Automation):
+ """
+ Automation actions
+ """
+
+ ############################################################
+ def __init__(self, cmind, automation_file):
+ super().__init__(cmind, __file__)
+
+ ############################################################
+ def test(self, i):
+ """
+ Test automation
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ automation (str): automation as CM string object
+
+ parsed_automation (list): prepared in CM CLI or CM access function
+ [ (automation alias, automation UID) ] or
+ [ (automation alias, automation UID), (automation repo alias, automation repo UID) ]
+
+ (artifact) (str): artifact as CM string object
+
+ (parsed_artifact) (list): prepared in CM CLI or CM access function
+ [ (artifact alias, artifact UID) ] or
+ [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
+
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * Output from this automation action
+
+ """
+
+ import json
+ print(json.dumps(i, indent=2))
+
+ return {'return': 0}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/experiment/COPYRIGHT.md
new file mode 100644
index 0000000000..2a313520bb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort.
diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/README-extra.md b/cmx4mlops/cmx4mlops/repo/automation/experiment/README-extra.md
new file mode 100644
index 0000000000..454c8d6ac2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/README-extra.md
@@ -0,0 +1,315 @@
+[ [Back to index](../../../docs/README.md) ]
+
+
+Click here to see the table of contents.
+
+* [CM "experiment" automation](#cm-"experiment"-automation)
+ * [Introducing CM experiment automation](#introducing-cm-experiment-automation)
+ * [Installing CM with ResearchOps/DevOps/MLOps automations](#installing-cm-with-researchops/devops/mlops-automations)
+ * [Understanding CM experiments](#understanding-cm-experiments)
+ * [Exploring combinations of parameters (autotuning, design space exploration)](#exploring-combinations-of-parameters-autotuning-design-space-exploration)
+ * [Aggregating and unifying results](#aggregating-and-unifying-results)
+ * [Visualizing results](#visualizing-results)
+ * [Sharing experiments with the community](#sharing-experiments-with-the-community)
+ * [Running CM experiments with CM scripts](#running-cm-experiments-with-cm-scripts)
+ * [Further community developments](#further-community-developments)
+
+
+
+# CM "experiment" automation
+
+*We suggest you to check [CM introduction](https://github.com/mlcommons/ck/blob/master/docs/introduction-cm.md),
+ [CM CLI/API](https://github.com/mlcommons/ck/blob/master/docs/interface.md)
+ and [CM scripts](../script/README-extra.md) to understand CM motivation and concepts.
+ You can also try [CM tutorials](https://github.com/mlcommons/ck/blob/master/docs/tutorials/README.md)
+ to run some applications and benchmarks on your platform using CM scripts.*
+
+## Introducing CM experiment automation
+
+
+Researchers, engineers and students spend considerable amount of their time experimenting with
+many different settings of applications, tools, compilers, software and hardware
+to find the optimal combination suitable for their use cases.
+
+Based on their feedback, our [MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md)
+started developing a CM automation called "experiment".
+The goal is to provide a common interface to run, record, share, visualize and reproduce experiments
+on any platform with any software, hardware and data.
+
+The community helped us test a prototype of our "experiment" automation to record results in a unified CM format
+from [several MLPerf benchmarks](https://github.com/mlcommons/cm4mlperf-results)
+including [MLPerf inference](https://github.com/mlcommons/inference) and [MLPerf Tiny](https://github.com/mlcommons/tiny),
+visualize them at the [MLCommons CM platform](https://access.cknowledge.org/playground/?action=experiments&tags=all),
+and improve them by the community via [public benchmarking, optimization and reproducibility challenges](https://access.cknowledge.org/playground/?action=challenges).
+
+
+
+## Installing CM with ResearchOps/DevOps/MLOps automations
+
+This CM automation is available in the most commonly used `mlcommons@cm4mlops` repository.
+
+First, install CM automation language as described [here](https://github.com/mlcommons/ck/blob/master/docs/installation.md).
+Then, install or update this repository as follows:
+```bash
+cm pull repo mlcommons@cm4mlops
+```
+
+You can now test that CM experiment automation is available as follows:
+```bash
+cm run experiment --help
+```
+or using `cme` shortcut in CM V1.4.1+
+```bash
+cme --help
+```
+
+
+
+## Understanding CM experiments
+
+CM experiment simply wraps any user command line, creates an associated CM `experiment` artifact with a random ID (16 low case HEX characters)
+and some user tags in `_cm.json`, creates extra `{date}{time}` subdirectory with `cm-input.json` file with CM input,
+and executes the user command line inside an extra subdirectory with another random ID as shown below.
+
+The following command will print "Hello World!" while recording all the provenance in CM format in the local CM repository:
+
+```bash
+cme --tags=my,experiment,hello-world -- echo "Hello World!"
+```
+or
+```bash
+cm run experiment --tags=my,experiment,hello-world -- echo "Hello World!"
+```
+
+You should see the output similar to the following:
+```bash
+
+Path to CM experiment artifact: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945
+Path to experiment: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945\2023-06-09.09-58-02.863466
+================================================================
+Experiment step: 1 out of 1
+
+Path to experiment step: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945\2023-06-09.09-58-02.863466\7ed0ea0edd6b4dd7
+
+"Hello World!"
+```
+
+You can find and explore the newly created CM artifact as follows:
+```bash
+cm find experiment --tags=my,experiment,hello-world
+```
+or using UID
+```bash
+cm find experiment b83a1fb24dbf4945
+```
+
+When running the same experiment again, CM will find existing artifact by tags and create new {date}{time} directory there:
+```bash
+cme --tags=my,experiment,hello-world -- echo "Hello World!"
+
+Path to CM experiment artifact: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945
+Path to experiment: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945\2023-06-09.10-02-08.911210
+================================================================
+Experiment step: 1 out of 1
+
+Path to experiment step: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945\2023-06-09.10-02-08.911210\7ed0ea0edd6b4dd7
+
+"Hello World!"
+```
+
+You can now replay this experiment as follows:
+```bash
+cm replay experiment --tags=my,experiment,hello-world
+```
+
+Note that you can obtain current directory where you called CM
+(rather than the CM experiment artifact directory) via {{CD}} variable as follows:
+```bash
+cme --tags=my,experiment,hello-world -- echo {{CD}}
+```
+
+You can also record experiments in another CM repository instead of the `local` one as follows:
+```bash
+cm list repo
+cme {CM repository from above list}: --tags=my,experiment,hello-world -- echo {{CD}}
+```
+
+Finally, you can force a specific artifact name instead of some random ID as follows:
+```bash
+cme {my experiment artifact name} --tags=my,experiment,hello-world -- echo {{CD}}
+```
+or with given repository
+```bash
+cme {CM repository from above list}:{my experiment artifact name} --tags=my,experiment,hello-world -- echo {{CD}}
+```
+
+## Exploring combinations of parameters (autotuning, design space exploration)
+
+One of the most common tasks is computer engineering (and other sciences)
+is to explore various combinations of parameters of some applications
+and systems to select the optimal ones to trade off performance, accuracy,
+power consumption, memory usage and other characteristics.
+
+As a starting point, we have implemented a very simple explorer as a Cartesian product
+of any number of specified variables that are passed to a user command line via double curly braces `{{VAR}}` similar to GitHub.
+
+You just need to create a simple JSON file `cm-input.json` to describe sets/ranges for each variable as follows:
+```json
+{
+ "explore": {
+ "VAR1": [
+ 1,
+ 2,
+ 3
+ ],
+ "VAR2": [
+ "a",
+ "b"
+ ],
+ "VAR3": "[2**i for i in range(0,6)]"
+ }
+}
+```
+
+or YAML `cm-input.yaml`:
+
+```yaml
+explore:
+ VAR1: [1,2,3]
+ VAR2: ["a","b"]
+ VAR3: "[2**i for i in range(0,6)]"
+```
+
+You can then run the following example to see all iterations:
+```bash
+cm run experiment --tags=my,experiment,hello-world @test_input.yaml \
+ -- echo %VAR1% --batch_size={{VAR1}} {{VAR2}} {{VAR4{['xx','yy','zz']}}}-%%VAR3%%
+```
+
+Note that you can also define a Python list of range for other variables
+directly in the command line as demonstrated in above example for `VAR4` - `{{VAR4{['xx','yy','zz']}}}`.
+
+CM will create or reuse experiment artifact with tags `my,experiment,hello-world`
+and will then iterate in a Cartesian product of all detected variables.
+
+For each iteration, CM will create a `{date}{time}` subdirectory in a given experiment artifact
+and will then run a user command line with substituted variables there.
+
+You can then replay any of the exploration experiment as follows:
+```bash
+cm replay experiment --tags={tags} --dir={sub directory}
+```
+
+
+
+## Aggregating and unifying results
+
+Users can expose any information such as measured characteristics of their applications and/or systems (performance,
+hardware or OS state, accuracy, internal parameters, etc) to CM for further analysis and visualization
+by generating a JSON `cm-result.json` file with any dictionary.
+
+If this file exists after executing a user command, CM will load it after each experiment or exploration step,
+and merge it with a list in a common `cm-result.json` in `{date}{time}` directory for this experiment.
+
+
+
+## Visualizing results
+
+Users can now visualize multiple experiments using the CM GUI script as follows:
+```bash
+cm run script "gui _graph" --exp_tags=my,experiment,hello-world
+```
+
+This script will search for all CM experiment entries with these tags, read all `cm-result.json` files,
+detect all keys used in result dictionaries, let users select these keys for X and Y axes
+to prepare a 2D graph using a popular [StreamLit library](https://streamlit.io), add derived metrics and set constraints
+as shown in the following example for one of the official [Tiny MLPerf submissions](https://github.com/mlcommons/tiny):
+
+
+
+
+
+
+
+
+## Sharing experiments with the community
+
+It is possible to share experiments with a common automation interface
+in your own GitHub/GitLab repository, container and zip/tar file
+in a non-intrusive way.
+
+You need to go to a root directory of your project and initialize CM repository there
+with a unique name "my-cool-project" as follows:
+
+```bash
+cm init repo my-cool-project --path=. --prefix=cmr
+```
+
+This command will create a `cmr.yaml` file with a description and unique ID of this repository,
+and will register it in the CM. Note that all CM automations and artifacts will be located
+in the `cmr` sub-directory to avoid contaminating your project. They can be deleted
+or moved to another project at any time.
+
+You can now record new experiments in this repository by adding `my-cool-project:` to the cm experiment command line as follows:
+```bash
+cm run experiment my-cool-project: --tags=my,experiment,hello-world -- echo "Hello World!"
+```
+
+You can also move a set of existing experiments from the `local` CM repository to the new one as follows:
+```bash
+cm move experiment my-cool-project: --tags=my,experiment,hello-world
+```
+
+You can continue replaying these experiments in the way no matter what CM repository they are in:
+```bash
+cm replay experiment --tags=my,experiment,hello-world
+```
+
+or you can enforce a specific repository as follows:
+```bash
+cm replay experiment my-cool-project: --tags=my,experiment,hello-world
+```
+
+
+
+
+
+## Running CM experiments with CM scripts
+
+User scripts and tools may contain some hardwired local paths that may prevent replaying them on another platform.
+In such case, we suggest you to use [CM scripts](/../script/README-extra.md).
+
+CM scripts solve this problem by wrapping existing user scripts and tools and detecting/resolving paths
+to specific tools and artifacts on a given user platform.
+
+You can find example of using CM scripts with CM experiments in [this directory](tests) - see `test3.bat` or `test3.sh`:
+```bash
+cm run experiment --tags=test @test3_input.yaml -- cm run script "print hello-world native" --env.CM_ENV_TEST1={{VAR1}} --const.CM_ENV_TEST2={{VAR2}}
+```
+
+You can use the following environment variables to pass the current path,
+different paths to experiment entries and the number of experiment to your CM script:
+* {{CD}}
+* {{CM_EXPERIMENT_STEP}}
+* {{CM_EXPERIMENT_PATH}}
+* {{CM_EXPERIMENT_PATH2}}
+* {{CM_EXPERIMENT_PATH3}}
+
+
+Feel free to check [this tutorial](../../../docs/tutorials/common-interface-to-reproduce-research-projects.md)
+to add CM scripts for your own applications, tools and native scripts.
+
+We are currently extending CM experiments and CM scripts for MLPerf benchmarks
+to automate benchmarking, optimization and design space exploration of ML/AI systems
+on any software and hardware - please stay tuned via our [Discord server](https://discord.gg/JjWNWXKxwT).
+
+
+
+## Further community developments
+
+We are developing this experiment automation in CM to help the community share, reproduce and reuse experiments
+using a common, simple, human readable, and portable [automation language](../../../docs/README.md).
+
+Join our [Discord server](https://discord.gg/JjWNWXKxwT) from the [MLCommons task force on automation and reproducibility](../taskforce.md)
+to participate in the unification and extension of this interface and CM scripts for diverse research projects and tools.
+
diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/README.md b/cmx4mlops/cmx4mlops/repo/automation/experiment/README.md
new file mode 100644
index 0000000000..13ea6ec1a5
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/README.md
@@ -0,0 +1,87 @@
+*This README is automatically generated - don't edit! See [extra README](README-extra.md) for extra notes!*
+
+### Automation actions
+
+#### test
+
+ * CM CLI: ```cm test experiment``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L22))
+ * CM CLI with UID: ```cm test experiment,a0a2d123ef064bcb``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L22))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'test'
+ 'automation':'experiment,a0a2d123ef064bcb'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L22)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### run
+
+ * CM CLI: ```cm run experiment``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L64))
+ * CM CLI with UID: ```cm run experiment,a0a2d123ef064bcb``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L64))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'run'
+ 'automation':'experiment,a0a2d123ef064bcb'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L64)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### rerun
+
+ * CM CLI: ```cm rerun experiment``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L428))
+ * CM CLI with UID: ```cm rerun experiment,a0a2d123ef064bcb``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L428))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'rerun'
+ 'automation':'experiment,a0a2d123ef064bcb'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L428)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### replay
+
+ * CM CLI: ```cm replay experiment``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L451))
+ * CM CLI with UID: ```cm replay experiment,a0a2d123ef064bcb``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L451))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'replay'
+ 'automation':'experiment,a0a2d123ef064bcb'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L451)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+### Maintainers
+
+* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/experiment/_cm.json
new file mode 100644
index 0000000000..49bb0e6166
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/_cm.json
@@ -0,0 +1,11 @@
+{
+ "alias": "experiment",
+ "automation_alias": "automation",
+ "automation_uid": "bbeb15d8f0a944a4",
+ "desc": "Managing and reproducing experiments (under development)",
+ "developers": "[Grigori Fursin](https://cKnowledge.org/gfursin)",
+ "tags": [
+ "automation"
+ ],
+ "uid": "a0a2d123ef064bcb"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/module.py b/cmx4mlops/cmx4mlops/repo/automation/experiment/module.py
new file mode 100644
index 0000000000..c83e7c0499
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/module.py
@@ -0,0 +1,855 @@
+# Universal experiment automation to support universal benchmarking
+# and optimization of apps and systems
+#
+# Author: Grigori Fursin
+# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+import os
+import itertools
+import copy
+import json
+
+from cmind.automation import Automation
+from cmind import utils
+
+
+class CAutomation(Automation):
+ """
+ CM "experiment" automation actions
+ """
+
+ CM_RESULT_FILE = 'cm-result.json'
+ CM_INPUT_FILE = 'cm-input.json'
+ CM_OUTPUT_FILE = 'cm-output.json'
+
+ ############################################################
+ def __init__(self, cmind, automation_file):
+ super().__init__(cmind, __file__)
+
+ ############################################################
+ def test(self, i):
+ """
+ Test automation
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ automation (str): automation as CM string object
+
+ parsed_automation (list): prepared in CM CLI or CM access function
+ [ (automation alias, automation UID) ] or
+ [ (automation alias, automation UID), (automation repo alias, automation repo UID) ]
+
+ (artifact) (str): artifact as CM string object
+
+ (parsed_artifact) (list): prepared in CM CLI or CM access function
+ [ (artifact alias, artifact UID) ] or
+ [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
+
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * Output from this automation action
+ """
+
+ import json
+ print(json.dumps(i, indent=2))
+
+ return {'return': 0}
+
+ ############################################################
+
+ def run(self, i):
+ """
+ Run experiment
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ (artifact) (str): experiment artifact name (can include repository separated by :)
+ (tags) (str): experiment tags separated by comma
+
+ (dir) (str): force recording into a specific directory
+
+
+ (script) (str): find and run CM script by name
+ (s)
+
+ (script_tags) (str): find and run CM script by tags
+ (stags)
+
+ (rerun) (bool): if True, rerun experiment in a given entry/directory instead of creating a new one...
+
+ (explore) (dict): exploration dictionary
+
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * Output from this automation action
+ """
+
+ # Copy of original input
+ ii_copy = copy.deepcopy(i)
+ cur_dir = os.getcwd()
+
+ # Find or add artifact based on repo/alias/tags
+ r = self._find_or_add_artifact(i)
+ if r['return'] > 0:
+ return r
+
+ experiment = r['experiment']
+
+ console = i.get('out', '') == 'con'
+
+ # Print experiment folder
+ experiment_path = experiment.path
+
+ if console:
+ print('')
+ print('Path to CM experiment artifact: {}'.format(experiment_path))
+
+ # Get directory with datetime
+ datetime = i.get('dir', '')
+
+ if datetime == '' and i.get('rerun', False):
+ # Check if already some dir exist
+
+ directories = os.listdir(experiment_path)
+
+ datetimes = sorted([f for f in directories if os.path.isfile(
+ os.path.join(experiment_path, f, self.CM_RESULT_FILE))], reverse=True)
+
+ if len(datetimes) == 1:
+ datetime = datetimes[0]
+ elif len(datetimes) > 1:
+ print('')
+ print('Select experiment:')
+
+ datetimes = sorted(datetimes)
+
+ num = 0
+ print('')
+ for d in datetimes:
+ print('{}) {}'.format(num, d.replace('.', ' ')))
+ num += 1
+
+ if not console:
+ return {
+ 'return': 1, 'error': 'more than 1 experiment found.\nPlease use "cm rerun experiment --dir={date and time}"'}
+
+ print('')
+ x = input('Make your selection or press Enter for 0: ')
+
+ x = x.strip()
+ if x == '':
+ x = '0'
+
+ selection = int(x)
+
+ if selection < 0 or selection >= num:
+ selection = 0
+
+ datetime = datetimes[selection]
+
+ if datetime != '':
+ experiment_path2 = os.path.join(experiment_path, datetime)
+ else:
+ num = 0
+ found = False
+
+ while not found:
+ r = utils.get_current_date_time({})
+ if r['return'] > 0:
+ return r
+
+ datetime = r['iso_datetime'].replace(
+ ':', '-').replace('T', '.')
+
+ if num > 0:
+ datetime += '.' + str(num)
+
+ experiment_path2 = os.path.join(experiment_path, datetime)
+
+ if not os.path.isdir(experiment_path2):
+ found = True
+ break
+
+ num += 1
+
+ # Check/create directory with date_time
+ if not os.path.isdir(experiment_path2):
+ os.makedirs(experiment_path2)
+
+ # Change current path
+ print('Path to experiment: {}'.format(experiment_path2))
+
+ os.chdir(experiment_path2)
+
+ # Record experiment input with possible exploration
+ experiment_input_file = os.path.join(
+ experiment_path2, self.CM_INPUT_FILE)
+ experiment_result_file = os.path.join(
+ experiment_path2, self.CM_RESULT_FILE)
+
+ # Clean original input
+ for k in ['parsed_artifact', 'parsed_automation', 'cmd']:
+ if k in ii_copy:
+ del (ii_copy[k])
+
+ r = utils.save_json(file_name=experiment_input_file, meta=ii_copy)
+ if r['return'] > 0:
+ return r
+
+ # Prepare run command
+ cmd = ''
+
+ unparsed = i.get('unparsed_cmd', [])
+ if len(unparsed) > 0:
+ for u in unparsed:
+ if ' ' in u:
+ u = '"' + u + '"'
+ cmd += ' ' + u
+
+ cmd = cmd.strip()
+
+ # Prepare script run
+ env = i.get('env', {})
+
+ ii = {'action': 'native-run',
+ 'automation': 'script,5b4e0237da074764',
+ 'env': env}
+
+ # Prepare exploration
+ # Note that from Python 3.7, dictionaries are ordered so we can define order for exploration in json/yaml
+ # ${{XYZ}} ${{ABC(range(1,2,3))}}
+
+ # Extract exploration expressions from {{VAR{expression}}}
+ explore = i.get('explore', {})
+
+ j = 1
+ k = 0
+ while j >= 0:
+ j = cmd.find('}}}', k)
+ if j >= 0:
+ k = j + 1
+
+ l = cmd.rfind('{{', 0, j)
+
+ if l >= 0:
+ l2 = cmd.find('{', l + 2, j)
+ if l2 >= 0:
+ k = l2 + 1
+
+ var = cmd[l + 2:l2]
+ expr = cmd[l2 + 1:j]
+
+ explore[var] = expr
+
+ cmd = cmd[:l2] + cmd[j + 1:]
+
+ # Separate Design Space Exploration into var and range
+ explore_keys = []
+ explore_dimensions = []
+
+ for k in explore:
+ v = explore[k]
+
+ explore_keys.append(k)
+
+
+if not isinstance(v, if ) v = eval(v)
+
+ explore_dimensions.append(v)
+
+ # Next command will run all iterations so we need to redo above command
+ # once again
+ step = 0
+
+ steps = itertools.product(*explore_dimensions)
+
+ num_steps = len(list(steps))
+
+ steps = itertools.product(*explore_dimensions)
+
+ ii_copy = copy.deepcopy(ii)
+
+ for dimensions in steps:
+
+ step += 1
+
+ print('================================================================')
+ print('Experiment step: {} out of {}'.format(step, num_steps))
+
+ print('')
+
+ ii = copy.deepcopy(ii_copy)
+
+ env = ii.get('env', {})
+
+ l_dimensions = len(dimensions)
+ if l_dimensions > 0:
+ print(' Updating ENV variables during exploration:')
+
+ print('')
+ for j in range(l_dimensions):
+ v = dimensions[j]
+ k = explore_keys[j]
+ print(' - Dimension {}: "{}" = {}'.format(j, k, v))
+
+ env[k] = str(v)
+
+ print('')
+
+ # Generate UID and prepare extra directory:
+ r = utils.gen_uid()
+ if r['return'] > 0:
+ return r
+
+ uid = r['uid']
+
+ experiment_path3 = os.path.join(experiment_path2, uid)
+ if not os.path.isdir(experiment_path3):
+ os.makedirs(experiment_path3)
+
+ # Get date time of experiment
+ r = utils.get_current_date_time({})
+ if r['return'] > 0:
+ return r
+
+ current_datetime = r['iso_datetime']
+
+ # Change current path
+ print('Path to experiment step: {}'.format(experiment_path3))
+ print('')
+ os.chdir(experiment_path3)
+
+ # Prepare and run experiment in a given placeholder directory
+ os.chdir(experiment_path3)
+
+ ii['env'] = env
+
+ # Change only in CMD
+ env_local = {'CD': cur_dir,
+ 'CM_EXPERIMENT_STEP': str(step),
+ 'CM_EXPERIMENT_PATH': experiment_path,
+ 'CM_EXPERIMENT_PATH2': experiment_path2,
+ 'CM_EXPERIMENT_PATH3': experiment_path3}
+
+ # Update {{}} in CMD
+ cmd_step = cmd
+
+ j = 1
+ k = 0
+ while j >= 0:
+ j = cmd_step.find('{{', k)
+ if j >= 0:
+ k = j
+ l = cmd_step.find('}}', j + 2)
+ if l >= 0:
+ var = cmd_step[j + 2:l]
+
+ # Such vars must be in env
+ if var not in env and var not in env_local:
+ return {
+ 'return': 1, 'error': 'key "{}" is not in env during exploration'.format(var)}
+
+ if var in env:
+ value = env[var]
+ else:
+ value = env_local[var]
+
+ cmd_step = cmd_step[:j] + str(value) + cmd_step[l + 2:]
+
+ ii['command'] = cmd_step
+
+ print('Generated CMD:')
+ print('')
+ print(cmd_step)
+ print('')
+
+ # Prepare experiment step input
+ experiment_step_input_file = os.path.join(
+ experiment_path3, self.CM_INPUT_FILE)
+
+ r = utils.save_json(file_name=experiment_step_input_file, meta=ii)
+ if r['return'] > 0:
+ return r
+
+ experiment_step_output_file = os.path.join(
+ experiment_path3, self.CM_OUTPUT_FILE)
+ if os.path.isfile(experiment_step_output_file):
+ os.delete(experiment_step_output_file)
+
+ # Run CMD
+ rr = self.cmind.access(ii)
+ if rr['return'] > 0:
+ return rr
+
+ # Record output
+ result = {}
+
+ if os.path.isfile(experiment_step_output_file):
+ r = utils.load_json(file_name=experiment_step_output_file)
+ if r['return'] > 0:
+ return r
+
+ result = r['meta']
+
+ # Try to flatten
+ try:
+ flatten_result = flatten_dict(result)
+ result = flatten_result
+ except BaseException:
+ pass
+
+ # Add extra info
+ result['uid'] = uid
+ result['iso_datetime'] = current_datetime
+
+ # Attempt to append to the main file ...
+ all_results = []
+
+ if os.path.isfile(experiment_result_file):
+ r = utils.load_json(file_name=experiment_result_file)
+ if r['return'] > 0:
+ return r
+
+ all_results = r['meta']
+
+ all_results.append(result)
+
+ r = utils.save_json(
+ file_name=experiment_result_file,
+ meta=all_results)
+ if r['return'] > 0:
+ return r
+
+ rr = {'return': 0,
+ 'experiment_path': experiment_path,
+ 'experiment_path2': experiment_path2}
+
+ return rr
+
+ ############################################################
+
+ def rerun(self, i):
+ """
+ Rerun experiment
+
+ cm run experiment --rerun=True ...
+ """
+
+ i['rerun'] = True
+
+ return self.run(i)
+
+ ############################################################
+
+ def replay(self, i):
+ """
+ Replay experiment
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ (artifact) (str): experiment artifact
+
+ (tags) (str): experiment tags separated by comma
+
+ (dir) (str): experiment directory (often date time)
+ (uid) (str): unique ID of an experiment
+
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * Output from this automation action
+ """
+
+ # Find or add artifact based on repo/alias/tags
+ i['fail_if_not_found'] = True
+ r = self._find_or_add_artifact(i)
+ if r['return'] > 0:
+ return r
+
+ experiment = r['experiment']
+
+ console = i.get('out', '') == 'con'
+
+ # Print experiment folder
+ experiment_path = experiment.path
+
+ if console:
+ print('')
+ print('Path to CM experiment artifact: {}'.format(experiment_path))
+
+ # Check date and time folder
+ uid = i.get('uid', '')
+ datetime = i.get('dir', '')
+
+ if datetime != '':
+ datetimes = [datetime]
+ else:
+ directories = os.listdir(experiment_path)
+
+ datetimes = sorted([f for f in directories if os.path.isfile(
+ os.path.join(experiment_path, f, self.CM_RESULT_FILE))], reverse=True)
+
+ if len(datetimes) == 0:
+ return {'return': 1, 'error': 'experiment(s) not found in {}'.format(
+ experiment_path)}
+
+ # Check datetime directory
+ found_result = {}
+
+ if uid != '':
+ for d in datetimes:
+ r = self._find_uid({'path': experiment_path, 'datetime': d, 'uid': uid})
+ if r['return'] > 0:
+ return r
+
+ if len(r.get('result', {})) > 0:
+ found_result = r['result']
+ datetime = d
+ experiment_path2 = os.path.join(experiment_path, datetime)
+ break
+
+ if len(found_result) == 0:
+ return {'return': 1, 'error': 'couldn\'t find result with UID {} in {}'.format(
+ uid, experiment_path)}
+
+ else:
+ if len(datetimes) == 1:
+ datetime = datetimes[0]
+ else:
+ print('')
+ print('Available experiments:')
+
+ datetimes = sorted(datetimes)
+
+ num = 0
+ print('')
+ for d in datetimes:
+ print('{}) {}'.format(num, d.replace('.', ' ')))
+ num += 1
+
+ if not console:
+ return {
+ 'return': 1, 'error': 'more than 1 experiment found.\nPlease use "cm run experiment --dir={date and time}"'}
+
+ print('')
+ x = input('Make your selection or press Enter for 0: ')
+
+ x = x.strip()
+ if x == '':
+ x = '0'
+
+ selection = int(x)
+
+ if selection < 0 or selection >= num:
+ selection = 0
+
+ datetime = datetimes[selection]
+
+ # Final path to experiment
+ experiment_path2 = os.path.join(experiment_path, datetime)
+
+ if not os.path.isdir(experiment_path2):
+ return {'return': 1, 'error': 'experiment path not found {}'.format(
+ experiment_path2)}
+
+ r = self._find_uid({'path': experiment_path, 'datetime': datetime})
+ if r['return'] > 0:
+ return r
+
+ results = r['meta']
+
+ if len(results) == 0:
+ return {'return': 1, 'error': 'results not found in {}'.format(
+ experiment_path2)}
+
+ elif len(results) == 1:
+ selection = 0
+
+ else:
+ print('')
+ print('Available Unique IDs of results:')
+
+ results = sorted(results, key=lambda x: x.get('uid', ''))
+
+ num = 0
+ print('')
+ for r in results:
+ print('{}) {}'.format(num, r.get('uid', '')))
+ num += 1
+
+ if not console:
+ return {
+ 'return': 1, 'error': 'more than 1 result found.\nPlease use "cm run experiment --uid={result UID}"'}
+
+ print('')
+ x = input('Make your selection or press Enter for 0: ')
+
+ x = x.strip()
+ if x == '':
+ x = '0'
+
+ selection = int(x)
+
+ if selection < 0 or selection >= num:
+ selection = 0
+
+ found_result = results[selection]
+ uid = found_result['uid']
+
+ # Final info
+ if console:
+ print('')
+ print('Path to experiment: {}'.format(experiment_path2))
+
+ print('')
+ print('Result UID: {}'.format(uid))
+
+ # Attempt to load cm-input.json
+ experiment_input_file = os.path.join(
+ experiment_path2, self.CM_INPUT_FILE)
+
+ if not os.path.isfile(experiment_input_file):
+ return {
+ 'return': 1, 'error': '{} not found - can\'t replay'.format(self.CM_INPUT_FILE)}
+
+ r = utils.load_json(experiment_input_file)
+ if r['return'] > 0:
+ return r
+
+ cm_input = r['meta']
+
+ tags = cm_input.get('tags', '').strip()
+ if 'replay' not in tags:
+ if tags != '':
+ tags += ','
+ tags += 'replay'
+ cm_input['tags'] = tags
+
+ if console:
+ print('')
+ print('Experiment input:')
+ print('')
+ print(json.dumps(cm_input, indent=2))
+ print('')
+
+ # Run experiment again
+ r = self.cmind.access(cm_input)
+ if r['return'] > 0:
+ return r
+
+ # TBA - validate experiment, etc ...
+
+ return {'return': 0}
+
+ ############################################################
+
+ def _find_or_add_artifact(self, i):
+ """
+ Find or add experiment artifact (reused in run and reply)
+
+ Args:
+ (CM input dict):
+
+ (fail_if_not_found) (bool) - if True, fail if experiment is not found
+
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ experiment (CM artifact class): Experiment artifact
+
+ """
+
+ console = i.get('out', '') == 'con'
+
+ # Try to find experiment artifact by alias and/or tags
+ ii = utils.sub_input(i, self.cmind.cfg['artifact_keys'] + ['tags'])
+ ii['action'] = 'find'
+
+ ii_copy = copy.deepcopy(ii)
+
+ # If artifact is specified, remove tags
+ artifact = ii.get('artifact', '').strip()
+ if artifact != '' and not artifact.endswith(':') \
+ and '*' not in artifact and '?' not in artifact:
+ if 'tags' in ii:
+ del (ii['tags'])
+
+ r = self.cmind.access(ii)
+ if r['return'] > 0:
+ return r
+
+ lst = r['list']
+
+ if len(lst) > 1:
+ print('More than 1 experiment artifact found:')
+
+ lst = sorted(lst, key=lambda x: x.path)
+
+ num = 0
+ print('')
+ for e in lst:
+ print('{}) {}'.format(num, e.path))
+ print(
+ ' Tags: {}'.format(
+ ','.join(
+ e.meta.get(
+ 'tags',
+ []))))
+ num += 1
+
+ if not console:
+ return {'return': 1, 'error': 'more than 1 experiment artifact found.\nPlease use "cm run experiment {name}" or "cm run experiment --tags={tags separated by comma}"'}
+
+ print('')
+ x = input('Make your selection or press Enter for 0: ')
+
+ x = x.strip()
+ if x == '':
+ x = '0'
+
+ selection = int(x)
+
+ if selection < 0 or selection >= num:
+ selection = 0
+
+ experiment = lst[selection]
+
+ elif len(lst) == 1:
+ experiment = lst[0]
+ else:
+ # Create new entry
+ if i.get('fail_if_not_found', False):
+ return {'return': 1, 'error': 'experiment not found'}
+
+ ii = copy.deepcopy(ii_copy)
+ ii['action'] = 'add'
+ r = self.cmind.access(ii)
+ if r['return'] > 0:
+ return r
+
+ experiment_uid = r['meta']['uid']
+
+ r = self.cmind.access({'action': 'find',
+ 'automation': 'experiment,a0a2d123ef064bcb',
+ 'artifact': experiment_uid})
+ if r['return'] > 0:
+ return r
+
+ lst = r['list']
+ if len(lst) == 0 or len(lst) >1:
+ return {
+ 'return': 1, 'error': 'created experiment artifact with UID {} but can\'t find it - weird'.format(experiment_uid)}
+
+ experiment = lst[0]
+
+ return {'return': 0, 'experiment': experiment}
+
+ ############################################################
+ def _find_uid(self, i):
+ """
+ Find experiment result with a given UID
+
+ Args:
+ (CM input dict):
+
+ path (str): path to experiment artifact
+ datetime (str): sub-path to experiment
+ (uid) (str): experiment UID
+
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ path_to_file (str): path to experiment result file
+ meta (dict): complete list of all results
+ result (dict): result dictionary with a given UID
+
+ """
+
+ path = i['path']
+ datetime = i['datetime']
+ uid = i.get('uid', '').strip()
+
+ path_to_experiment_result_file = os.path.join(
+ path, datetime, self.CM_RESULT_FILE)
+
+ rr = {'return': 0, 'path_to_file': path_to_experiment_result_file}
+
+ if os.path.isfile(path_to_experiment_result_file):
+ r = utils.load_json(file_name=path_to_experiment_result_file)
+ if r['return'] > 0:
+ return r
+
+ meta = r['meta']
+
+ rr['meta'] = meta
+
+ # Searching for UID
+ if uid != '':
+ for result in meta:
+ ruid = result.get('uid', '').strip()
+ if ruid != '' and ruid ==uid:
+ rr['result'] = result
+ break
+
+ return rr
+
+############################################################################
+
+
+def flatten_dict(d, flat_dict= {}, prefix = ''):
+
+ for k in d:
+ v = d[k]
+
+ if type(v) is dict:
+ flatten_dict(v, flat_dict, prefix + k + '.')
+ else:
+ flat_dict[prefix + k] = v
+
+ return flat_dict
diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test2.bat b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test2.bat
new file mode 100644
index 0000000000..5ecb3a0d8d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test2.bat
@@ -0,0 +1 @@
+cm run experiment --tags=test @test_input.yaml -- echo %VAR1% --batch_size={{VAR1}} {{VAR2}} {{VAR4{['xx','yy','zz']}}}-%%VAR3%%
diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test2.sh b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test2.sh
new file mode 100644
index 0000000000..40d60a25a3
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test2.sh
@@ -0,0 +1 @@
+cm run experiment --tags=test @test_input.yaml -- echo "\${VAR1} --batch_size={{VAR1}} {{VAR2}} {{VAR4{['xx','yy','zz']}}}-\${VAR3}"
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3.bat b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3.bat
new file mode 100644
index 0000000000..800e36076d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3.bat
@@ -0,0 +1 @@
+cm run experiment --tags=test @test3_input.yaml -- cm run script "print hello-world native" --env.CM_ENV_TEST1={{VAR1}} --const.CM_ENV_TEST2={{VAR2}}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3.sh b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3.sh
new file mode 100644
index 0000000000..148e564337
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3.sh
@@ -0,0 +1 @@
+cm run experiment --tags=test @test3_input.yaml -- cm run script "print hello-world native" --env.CM_ENV_TEST1={{VAR1}} --const.CM_ENV_TEST2={{VAR2}}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3_input.yaml b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3_input.yaml
new file mode 100644
index 0000000000..1c789f52a5
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3_input.yaml
@@ -0,0 +1,4 @@
+explore:
+ VAR1: [1,2,3]
+ VAR2: ["a","b"]
+ CM_ENV_TEST3: "[2**i for i in range(0,6)]"
diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__json.bat b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__json.bat
new file mode 100644
index 0000000000..16eb9184b8
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__json.bat
@@ -0,0 +1 @@
+cm run experiment --tags=test @test_input.json -- {{CD}}\test_run.bat
diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__json.sh b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__json.sh
new file mode 100644
index 0000000000..a46cb98f5a
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__json.sh
@@ -0,0 +1 @@
+cm run experiment --tags=test @test_input.json -- {{CD}}/test_run.sh
diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__yaml.bat b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__yaml.bat
new file mode 100644
index 0000000000..e583f209bf
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__yaml.bat
@@ -0,0 +1 @@
+cm run experiment --tags=test @test_input.yaml -- {{CD}}\test_run.bat
diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__yaml.sh b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__yaml.sh
new file mode 100644
index 0000000000..60c2f7a80c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__yaml.sh
@@ -0,0 +1 @@
+cm run experiment --tags=test @test_input.yaml -- {{CD}}/test_run.sh
diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_input.json b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_input.json
new file mode 100644
index 0000000000..f682f5a344
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_input.json
@@ -0,0 +1,14 @@
+{
+ "explore": {
+ "VAR1": [
+ 1,
+ 2,
+ 3
+ ],
+ "VAR2": [
+ "a",
+ "b"
+ ],
+ "VAR3": "[2**i for i in range(0,6)]"
+ }
+}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_input.yaml b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_input.yaml
new file mode 100644
index 0000000000..a621c5ef95
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_input.yaml
@@ -0,0 +1,4 @@
+explore:
+ VAR1: [1,2,3]
+ VAR2: ["a","b"]
+ VAR3: "[2**i for i in range(0,6)]"
diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_run.bat b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_run.bat
new file mode 100644
index 0000000000..b3aa91028e
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_run.bat
@@ -0,0 +1,3 @@
+echo %VAR1% --batch_size=%VAR3% %VAR2%
+
+echo {"x":%VAR1%, "y":"%VAR2%", "z":%VAR3%} > cm-output.json
diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_run.sh b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_run.sh
new file mode 100644
index 0000000000..7ed1b472ed
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_run.sh
@@ -0,0 +1 @@
+echo $VAR1 --batch_size=$VAR3 $VAR2
diff --git a/cmx4mlops/cmx4mlops/repo/automation/project/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/project/COPYRIGHT.md
new file mode 100644
index 0000000000..2a313520bb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/project/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort.
diff --git a/cmx4mlops/cmx4mlops/repo/automation/project/README.md b/cmx4mlops/cmx4mlops/repo/automation/project/README.md
new file mode 100644
index 0000000000..e684ac7ade
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/project/README.md
@@ -0,0 +1,27 @@
+*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!*
+
+### Automation actions
+
+#### test
+
+ * CM CLI: ```cm test project``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/project/module.py#L15))
+ * CM CLI with UID: ```cm test project,6882553224164c56``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/project/module.py#L15))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'test'
+ 'automation':'project,6882553224164c56'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/project/module.py#L15)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+### Maintainers
+
+* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/automation/project/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/project/_cm.json
new file mode 100644
index 0000000000..68042c4319
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/project/_cm.json
@@ -0,0 +1,10 @@
+{
+ "alias": "project",
+ "automation_alias": "automation",
+ "automation_uid": "bbeb15d8f0a944a4",
+ "developers": "[Grigori Fursin](https://cKnowledge.org/gfursin)",
+ "tags": [
+ "automation"
+ ],
+ "uid": "6882553224164c56"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/project/module.py b/cmx4mlops/cmx4mlops/repo/automation/project/module.py
new file mode 100644
index 0000000000..963ab43b6f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/project/module.py
@@ -0,0 +1,66 @@
+# Author: Grigori Fursin
+# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+import os
+
+from cmind.automation import Automation
+from cmind import utils
+
+
+class CAutomation(Automation):
+ """
+ Automation actions
+ """
+
+ ############################################################
+ def __init__(self, cmind, automation_file):
+ super().__init__(cmind, __file__)
+
+ ############################################################
+ def test(self, i):
+ """
+ Test automation
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ automation (str): automation as CM string object
+
+ parsed_automation (list): prepared in CM CLI or CM access function
+ [ (automation alias, automation UID) ] or
+ [ (automation alias, automation UID), (automation repo alias, automation repo UID) ]
+
+ (artifact) (str): artifact as CM string object
+
+ (parsed_artifact) (list): prepared in CM CLI or CM access function
+ [ (artifact alias, artifact UID) ] or
+ [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
+
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * Output from this automation action
+
+ """
+
+ import json
+ print(json.dumps(i, indent=2))
+
+ return {'return': 0}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/report/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/report/COPYRIGHT.md
new file mode 100644
index 0000000000..2a313520bb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/report/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort.
diff --git a/cmx4mlops/cmx4mlops/repo/automation/report/README.md b/cmx4mlops/cmx4mlops/repo/automation/report/README.md
new file mode 100644
index 0000000000..6f2f966963
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/report/README.md
@@ -0,0 +1,27 @@
+*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!*
+
+### Automation actions
+
+#### test
+
+ * CM CLI: ```cm test report``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/report/module.py#L15))
+ * CM CLI with UID: ```cm test report,6462ecdba2054467``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/report/module.py#L15))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'test'
+ 'automation':'report,6462ecdba2054467'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/report/module.py#L15)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+### Maintainers
+
+* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/automation/report/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/report/_cm.json
new file mode 100644
index 0000000000..8808957575
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/report/_cm.json
@@ -0,0 +1,9 @@
+{
+ "alias": "report",
+ "automation_alias": "automation",
+ "automation_uid": "bbeb15d8f0a944a4",
+ "tags": [
+ "automation"
+ ],
+ "uid": "6462ecdba2054467"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/report/module.py b/cmx4mlops/cmx4mlops/repo/automation/report/module.py
new file mode 100644
index 0000000000..963ab43b6f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/report/module.py
@@ -0,0 +1,66 @@
+# Author: Grigori Fursin
+# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+import os
+
+from cmind.automation import Automation
+from cmind import utils
+
+
+class CAutomation(Automation):
+ """
+ Automation actions
+ """
+
+ ############################################################
+ def __init__(self, cmind, automation_file):
+ super().__init__(cmind, __file__)
+
+ ############################################################
+ def test(self, i):
+ """
+ Test automation
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ automation (str): automation as CM string object
+
+ parsed_automation (list): prepared in CM CLI or CM access function
+ [ (automation alias, automation UID) ] or
+ [ (automation alias, automation UID), (automation repo alias, automation repo UID) ]
+
+ (artifact) (str): artifact as CM string object
+
+ (parsed_artifact) (list): prepared in CM CLI or CM access function
+ [ (artifact alias, artifact UID) ] or
+ [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
+
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * Output from this automation action
+
+ """
+
+ import json
+ print(json.dumps(i, indent=2))
+
+ return {'return': 0}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/script/COPYRIGHT.md
new file mode 100644
index 0000000000..2a313520bb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort.
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/README-extra.md b/cmx4mlops/cmx4mlops/repo/automation/script/README-extra.md
new file mode 100644
index 0000000000..d63c5dc161
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/README-extra.md
@@ -0,0 +1,1034 @@
+[ [Back to index](../../../docs/README.md) ]
+
+# CM "script" automation
+
+
+Click here to see the table of contents.
+
+ * [Motivation](#motivation)
+ * [Obtaining shared CM scripts](#obtaining-shared-cm-scripts)
+ * [Getting started with CM scripts](#getting-started-with-cm-scripts)
+ * [Understanding CM scripts](#understanding-cm-scripts)
+ * [Wrapping native scripts](#wrapping-native-scripts)
+ * [Modifying environment variables](#modifying-environment-variables)
+ * [Understanding unified output dictionary](#understanding-unified-output-dictionary)
+ * [Modifying state dictionary](#modifying-state-dictionary)
+ * [Running CM scripts via CM Python API](#running-cm-scripts-via-cm-python-api)
+ * [Assembling pipelines (workflows) of CM scripts](#assembling-pipelines-workflows-of-cm-scripts)
+ * [Customizing CM script execution flow](#customizing-cm-script-execution-flow)
+ * [Caching output of CM scripts](#caching-output-of-cm-scripts)
+ * [Assembling pipeline to compile and run image corner detection](#assembling-pipeline-to-compile-and-run-image-corner-detection)
+ * [Customizing sub-dependencies in a pipeline](#customizing-sub-dependencies-in-a-pipeline)
+ * [Using Python virtual environments](#using-python-virtual-environments)
+ * [Assembling pipelines with other artifacts included](#assembling-pipelines-with-other-artifacts-included)
+ * [Unifying host OS and CPU detection](#unifying-host-os-and-cpu-detection)
+ * [Detecting, installing and caching system dependencies](#detecting-installing-and-caching-system-dependencies)
+ * [Using variations](#using-variations)
+ * [Running CM scripts inside containers](#running-cm-scripts-inside-containers)
+ * [Getting help about other script automation flags](#getting-help-about-other-script-automation-flags)
+ * [Further reading](#further-reading)
+
+
+
+*We suggest you to check [CM introduction](https://github.com/mlcommons/ck/blob/master/docs/introduction-cm.md)
+ and [CM CLI/API](https://github.com/mlcommons/ck/blob/master/docs/interface.md) to understand CM motivation and concepts.
+ You can also try [CM tutorials](https://github.com/mlcommons/ck/blob/master/docs/tutorials/README.md)
+ to run some applications and benchmarks on your platform using CM scripts.*
+
+## Motivation
+
+While helping the community reproduce [150+ research papers](https://learning.acm.org/techtalks/reproducibility),
+we have noticed that researchers always create their own ad-hoc scripts, environment variable and files
+to perform *exactly the same steps (actions) across all papers* to prepare, run and reproduce their experiments
+across different software, hardware, models and data.
+
+
+
+This experience motivated us to create a CM automation called "script" to warp native scripts
+from research and industrial projects with a common, simple and unified CM Command Line Interface and Python API.
+
+Such non-intrusive wrapping helps to make numerous native scripts and tools more reusable, interoperable, portable, findable
+and deterministic across different projects with different artifacts based on [FAIR principles](https://www.go-fair.org/fair-principles).
+
+CM scripts can be embedded into existing projects with minimal or no modifications at all, and they can be connected
+into powerful and portable pipelines and workflows using simple JSON or YAML files
+to prepare, run and reproduce experiments across continuously changing technology.
+
+Importantly, CM scripts can be executed in the same way in a native user environment,
+Python virtual environments (to avoid messing up native environment) and containers
+while automatically adapting to a given environment!
+
+
+
+
+
+
+
+## Obtaining shared CM scripts
+
+In order to reuse some CM scripts embedded into shared projects,
+you need to install these projects via the CM interface.
+
+For example, to use automation scripts developed by the
+[MLCommons task force on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md)
+and shared via GitHub, you just need to pull this repository via CM:
+
+```bash
+cm pull repo --url=https://github.com/mlcommons/cm4mlops --checkout=dev
+```
+
+or
+
+```bash
+cm pull repo mlcommons@cm4mlops --checkout=dev
+```
+
+You can now see all available CM scripts in your system as follows:
+
+```bash
+cm find script
+cm find script install* | sort
+
+```
+
+
+## Getting started with CM scripts
+
+You can run any of the above CM script on any platform as follows:
+```bash
+cm run script "tags separated by space" --keys=values --env.KEY=VALUE
+cm run script --tags="tags separated by comma" --keys=values --env.KEY=VALUE
+```
+or using a shortcut `cmr` available in CM V1.4.0+:
+```bash
+cmr "tags separated by space" --keys=values --env.KEY=VALUE
+```
+
+You can also use `-j` flag to print JSON output at the end of the script execution
+and `-v` flag to show extra debug information during script execution.
+
+For example, you can download a RESNET-50 model in ONNX format from Zenodo using the following script:
+```bash
+cmr "download file" --url=https://zenodo.org/record/4735647/files/resnet50_v1.onnx
+```
+
+You can also obtain info about your OS (Linux, Windows, MacOS) in a unified way and print JSON output
+as well as CM debug info as follows:
+```bash
+cmr "detect os" -j -v
+```
+
+You can turn on silent mode using CM cfg automation:
+```bash
+cm set cfg --key.script.silent
+```
+or
+```bash
+cm set cfg default --key.script.silent
+```
+
+
+## Understanding CM scripts
+
+CM scripts are treated as standard CM artifacts with the associated CM automation ["script"](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script),
+CM action ["run"](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/script/module.py#L73),
+and JSON and/or YAML meta descriptions.
+
+CM scripts can be invoked by using their alias, unique ID and human-readable tags (preferred method).
+
+For example, the [CM "Print Hello World" script](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world)
+simply wraps 2 native `run.sh` and `run.bat` scripts to print "Hello World" on Linux, MacOs or Windows
+together with a few environment variables:
+
+```bash
+ls `cm find script print-hello-world`
+
+README.md _cm.json run.bat run.sh
+```
+
+It is described by this [_cm.json meta description file](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/print-hello-world/_cm.json)
+with the following alias, UID and tags:
+
+```json
+{
+ "automation_alias": "script",
+ "automation_uid": "5b4e0237da074764",
+
+ "alias": "print-hello-world",
+ "uid": "b9f0acba4aca4baa",
+
+ "default_env": {
+ "CM_ENV_TEST1": "TEST1"
+ },
+
+ "env": {
+ "CM_ENV_TEST2": "TEST2"
+ },
+
+ "input_mapping": {
+ "test1": "CM_ENV_TEST1"
+ },
+
+ "new_env_keys": [
+ "CM_ENV_TEST*"
+ ],
+
+ "new_state_keys": [
+ "hello_test*"
+ ],
+
+ "tags": [
+ "print",
+ "hello-world",
+ "hello world",
+ "hello",
+ "world",
+ "native-script",
+ "native",
+ "script"
+ ]
+}
+```
+
+The `automation_alias` and `automation_uid` tells CM that this artifact can be used with the CM "script" automation.
+
+Therefore, this script can be executed from the command line in any of the following ways:
+
+```bash
+cm run script print-hello-world
+cm run script b9f0acba4aca4baa
+cm run script --tags=print,native-script,hello-world
+cm run script "print native-script hello-world"
+```
+
+The same script can be also executed using CM Python API as follows:
+```python
+import cmind
+
+output = cmind.access({'action':'run', 'automation':'script', 'tags':'print,native-script,hello-world'})
+if output['return']>0:
+ cmind.error(output)
+
+import json
+print (json.dumps(output, indent=2))
+```
+
+Normally you should see the following output along with some debug information (that will be removed soon):
+
+```bash
+
+...
+
+CM_ENV_TEST1 = TEST1
+CM_ENV_TEST2 = TEST2
+
+HELLO WORLD!
+...
+```
+
+### Wrapping native scripts
+
+*run.bat* and *run.sh* are native scripts that will be executed by this CM script in a unified way on Linux, MacOS and Windows:
+
+```bash
+echo ""
+echo "CM_ENV_TEST1 = ${CM_ENV_TEST1}"
+echo "CM_ENV_TEST2 = ${CM_ENV_TEST2}"
+
+echo ""
+echo "HELLO WORLD!"
+```
+
+The idea to use native scripts is to make it easier for researchers and engineers to reuse their existing automation scripts
+while providing a common CM wrapper with a unified CLI, Python API and extensible meta descriptions.
+
+
+
+
+### Modifying environment variables
+
+CM script automation CLI uses a flag `--env.VAR=VALUE` to set some environment variable and pass it to a native script
+as shown in this example:
+
+```bash
+cm run script "print native-script hello-world" \
+ --env.CM_ENV_TEST1=ABC1 --env.CM_ENV_TEST2=ABC2
+
+...
+
+CM_ENV_TEST1 = ABC1
+CM_ENV_TEST2 = TEST2
+
+HELLO WORLD!
+```
+
+Note, that *CM_ENV_TEST2* did not change. This happened because dictionary `env` in the *_cm.json* forces *CM_ENV_TEST2* to *TEST2*,
+while `default_env` dictionary allows environment variables to be updated externally.
+
+You can still force an environment variable to a given value externally using a `--const` flag as follows:
+
+```bash
+cm run script "print native-script hello-world" \
+ --env.CM_ENV_TEST1=ABC1 --const.CM_ENV_TEST2=ABC2
+
+...
+
+CM_ENV_TEST1 = ABC1
+CM_ENV_TEST2 = ABC2
+
+HELLO WORLD!
+
+```
+
+You can also use a JSON file instead of flags. Create *input.json* (or any other filename):
+```json
+{
+ "tags":"print,native-script,hello-world",
+ "env":{
+ "CM_ENV_TEST1":"ABC1"
+ }
+}
+```
+
+and run the CM script with this input file as follows:
+```
+cm run script @input.json
+```
+
+
+You can use YAML file instead of CLI. Create *input.yaml* (or any other filename):
+```yaml
+tags: "print,hello-world,script"
+env:
+ CM_ENV_TEST1: "ABC1"
+```
+
+and run the CM script with this input file as follows:
+```
+cm run script @input.yaml
+```
+
+Finally, you can map any other flag from the script CLI to an environment variable
+using the key `input_mapping` in the `_cm.json` meta description of this script:
+
+```bash
+cm run script "print native-script hello-world" --test1=ABC1
+
+...
+
+CM_ENV_TEST1 = ABC1
+CM_ENV_TEST2 = TEST2
+
+HELLO WORLD!
+
+```
+
+
+### Understanding unified output dictionary
+
+You can see the output of a given CM script in the JSON format by adding `--out=json` flag as follows:
+
+```bash
+cm run script --tags=print,hello-world,script --env.CM_ENV_TEST1=ABC1 --out=json
+
+...
+
+CM_ENV_TEST1 = ABC1
+CM_ENV_TEST2 = ABC2
+
+HELLO WORLD!
+
+{
+ "deps": [],
+ "env": {
+ "CM_ENV_TEST1": "ABC1",
+ "CM_ENV_TEST2": "TEST2"
+ },
+ "new_env": {
+ "CM_ENV_TEST1": "ABC1",
+ "CM_ENV_TEST2": "TEST2"
+ },
+ "new_state": {},
+ "return": 0,
+ "state": {}
+}
+```
+
+Note that `new_env`shows new environment variables produced and explicitly exposed by this script
+via a `new_env_keys` key in the `_cm.json` meta description of this script.
+
+This is needed to assemble automation pipelines and workflows while avoiding their contamination
+with temporal environments. CM script must explicitly expose environment variables that will
+go to the next stage of a pipeline.
+
+In the following example, `CM_ENV_TEST3` will be added to the `new_env` while `CM_XYZ` will not
+since it is not included in `"new_env_keys":["CM_ENV_TEST*"]`:
+
+```bash
+cm run script --tags=print,hello-world,script --env.CM_ENV_TEST1=ABC1 --out=json --env.CM_ENV_TEST3=ABC3 --env.CM_XYZ=XYZ
+```
+
+### Modifying state dictionary
+
+Sometimes, it is needed to use more complex structures than environment variables in scripts and workflows.
+We use a dictionary `state` that can be updated and exposed by a given script via `new_state_keys` key
+in the `_cm.json` meta description of this script.
+
+In the following example, `hello_world` key will be updated in the `new_state` dictionary,
+while `hello` key will not be updated because it is not included in the wild card `"new_state_key":["hello_world*"]`:
+
+```bash
+cm run script --tags=print,hello-world,script --out=json \
+ --state.hello=xyz1 --state.hello_world=xyz2
+
+...
+
+{
+ "deps": [],
+ "env": {
+ "CM_ENV_TEST1": "TEST1",
+ "CM_ENV_TEST2": "TEST2"
+ },
+ "new_env": {
+ "CM_ENV_TEST1": "TEST1",
+ "CM_ENV_TEST2": "TEST2"
+ },
+ "new_state": {
+ "hello_world": "xyz2"
+ },
+ "return": 0,
+ "state": {
+ "hello": "xyz1",
+ "hello_world": "xyz2"
+ }
+}
+```
+
+### Running CM scripts via CM Python API
+
+You can run a given CM script from python or Jupyter notebooks as follows:
+
+```python
+
+import cmind
+
+r = cmind.access({'action':'run',
+ 'automation':'script',
+ 'tags':'print,hello-world,script',
+ 'const':{
+ 'CM_ENV_TEST1':'ABC1',
+ },
+ 'env':{
+ 'CM_ENV_TEST2':'ABC2'
+ },
+ 'state': {
+ 'hello':'xyz1',
+ 'hello_world':'xyz2'
+ }
+ })
+
+print (r)
+
+```
+
+```bash
+...
+
+CM_ENV_TEST1 = ABC1
+CM_ENV_TEST2 = ABC2
+
+HELLO WORLD!
+
+{'return': 0,
+ 'env': {'CM_ENV_TEST2': 'TEST2', 'CM_ENV_TEST1': 'ABC1'},
+ 'new_env': {'CM_ENV_TEST2': 'TEST2', 'CM_ENV_TEST1': 'ABC1'},
+ 'state': {'hello': 'xyz1', 'hello_world': 'xyz2'},
+ 'new_state': {'hello_world': 'xyz2'},
+ 'deps': []}
+
+```
+
+
+
+### Assembling pipelines (workflows) of CM scripts
+
+We've added a simple mechanism to chain reusable CM scripts into complex pipelines
+without the need for specialized workflow frameworks.
+
+Simply add the following dictionary "deps" to the `_cm.json` or `_cm.yaml` of your script as follows:
+
+```json
+
+{
+ "deps": [
+ {
+ "tags": "a string of tags separated by comma to find and execute the 1st CM script"
+ },
+ {
+ "tags": "a string of tags separated by comma to find and execute the 1st CM script"
+ },
+ ...
+ ]
+}
+
+```
+
+This CM script will run all dependent scripts in above sequence, aggregate environment variable and `state` dictionary,
+and will then run native scripts.
+
+You can also turn on specific dependencies based on some values in specific environment variables or min/max version (if supported)
+in this pipeline as follows:
+
+```json
+
+{
+ "deps": [
+ {
+ "tags": "a string of tags separated by comma to find and execute the 1st CM script",
+ "enable_if_env": { "USE_CUDA" : ["yes", "YES", "true"] }
+ },
+ {
+ "tags": "a string of tags separated by comma to find and execute the 1st CM script"
+ "enable_if_env": { "USE_CPU" : ["yes", "YES", "true"] },
+ "version_min": "3.10"
+ },
+ ...
+ ]
+}
+
+```
+
+You can also specify dependencies to be invoked after executing native scripts
+using a dictionary `"post_deps"` with the same format `"deps"`.
+
+
+You can see an example of such dependencies in the [_cm.json](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/print-hello-world-py/_cm.json)
+of the ["print-hello-world-py" CM script](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/print-hello-world-py)
+that detects and unifies OS parameters using the ["detect-os" CM script](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-os),
+detects or builds Python using the ["get-python3" CM script](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3)
+and then runs `code.py` with "Hello World" from `run.sh` or `run.bat`:
+
+```bash
+cm run script "print python hello-world"
+```
+
+
+
+
+
+
+### Customizing CM script execution flow
+
+If a developer adds `customize.py` file inside a given CM script,
+it can be used to programmatically update environment variables, prepare input scripts
+and even invoke other scripts programmatically using Python.
+
+If a function `preprocess` exists in this file, CM script will call it before
+invoking a native script.
+
+If this function returns `{"skip":True}` in the output,
+further execution of this script will be skipped.
+
+After executing the preprocess function, the CM script automation will record the global state dictionary
+into *tmp-state.json* and the local state dictionary from this CM script into *tmp-state-new.json*.
+
+The CM script automation will then run a native script (run.sh on Linux/MacOS or run.bat on Windows)
+with all merged environment variables from previous scripts.
+
+Note that native scripts can also create 2 files that will be automatically picked up and processed by the CM script automation:
+* *tmp-run-env.out* - list of environment variables to update the "new_env" of a given CM script
+* *tmp-run-state.json* - the state dictionary to update the "new_state" of a given CM script
+
+If `postprocess` function exists in the *customize.py* file, the CM script will call it
+to finalize the postprocessing of files, environment variables, and the state dictionary.
+
+You can see an [example of such `customize.py` module](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-python3/customize.py) in the CM script
+to [detect or install/build Python interpreter](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3) in a unified way on any machine.
+
+This script exposes a number of environment variables for a detected Python
+in the [`postprocess` function](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-python3/customize.py#L60):
+
+* `CM_PYTHON_BIN` - python3.10 or python.exe or any other name of a Python interpreter on a given system
+* `CM_PYTHON_BIN_PATH` - path to a detected or installed python
+* `CM_PYTHON_BIN_WITH_PATH` - full path to a detected or installed python
+* `LD_LIBRARY_PATH` - updated LD_LIBRARY_PATH to python
+* `PATH` - updated PATH to python
+
+These environment variables can be reused by other CM scripts or external tools
+while decoupling them from specific python versions and paths, and even allowing
+multiple versions of tools and artifacts to co-exist on the same system
+and plugged into CM scripts:
+
+```bash
+cm run script "get python3" --out=json
+```
+
+
+
+### Caching output of CM scripts
+
+By default, CM scripts run wrapped scripts and tools, update environment variables and produce new files in the current directory.
+
+In many cases, we want to cache the output and environment variables when we run the same CM script with the same input again
+to avoid potentially lengthy detections, downloads, builds and data pre/post processing.
+
+That's why we have developed another CM automation called ["cache"](../cache/README-extra.md)
+to cache the output of scripts in the "cache" artifacts in the "local" CM repository
+that can be found by tags or unique IDs like any other CM artifact.
+
+Our convention is to use names *get-{tool or artifact}* for CM scripts that detect already installed artifacts,
+prepare their environment and cache them in the *local* CM repository using the "cache" automation.
+
+If installed artifact doesn't exist, we either enhance above scripts to include download, installation and even building
+for a given artifact (if it's a tool) or we create extra CM scripts *install-{tool or artifact}*
+that download and prepare tools and artifacts (install, build, preprocess, etc).
+
+For example, the CM script [*get-python3*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3)
+has *customize.py* with *preprocess* function that implements the search for python3 on Linux
+or python.exe on Windows, 2 native scripts *run.sh* and *run.bat* to obtain the version of the detected python installation,
+and *postprocess* function to prepare environment variables *CM_PYTHON_BIN* and *CM_PYTHON_BIN_WITH_PATH*
+that can be used by other CM scripts:
+
+```bash
+cm run script "get python" --out=json
+```
+
+If you run it for the first time and CM script detects multiple versions of python co-existing on your system,
+it will ask you to select one. CM will then cache the output in the *cache* artifact of the CM repository.
+You can see all *cache* CM entries for other tools and artifacts as follows:
+
+```bash
+cm show cache
+```
+or
+```bash
+cm show cache --tags=get,python
+```
+
+You can see the cached files as follows:
+```bash
+ls `cm find cache --tags=get,python`
+```
+
+* _cm.json - CM meta description of this "cache" artifact with its unique ID, tags and other meta information
+* cm-cached-state.json - dictionary with the new environment variables and the new state dictionary
+* tmp-env-all.sh - all environment variables used during CM script execution
+* tmp-env.sh - only new environment variables produced after CM script execution (it can be used directly by external tools)
+* tmp-run.sh - all environment variables and a call to the native script (useful for reproducibility)
+* tmp-state.json - the state before running native script - it can be loaded and used by native scripts and tools instead of using environment variables
+* tmp-ver.out - the output of the --version command parsed by `postprocess` and `detect_version` functions in `customize.py`
+
+
+If you (or other CM script) run this CM script to get the python tool for the second time, CM script will reuse the cached output:
+```bash
+cm run script "get python" --out=json
+```
+
+This also allows us to install multiple tool versions into different CM cache entries (python virtual environments,
+LLVM compiler, etc) and use them separately without the need to change higher-level CM scripts - these tools
+will be automatically plugged in:
+
+```bash
+cm run script "install prebuilt llvm" --version=14.0.0
+cm run script "install prebuilt llvm" --version=16.0.0
+cm run script "install src llvm"
+```
+
+
+Such approach allows us to "probe" the user environment, detect different tools and artifacts, unify them
+and adapt complex applications to a user environment in an automatic, transparent and non-intrusive way
+as shown in the next example.
+
+
+
+
+
+
+## Assembling pipeline to compile and run image corner detection
+
+We can use automatically detected compiler from CM script to create simple and technology-neutral compilation and execution pipelines
+in CM scripts.
+
+For example, we have implemented a simple [image corner detection CM script]( https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-corner-detection )
+with [this meta description](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-corner-detection/_cm.json).
+
+It uses two other reusable CM scripts to compile a given program using a detected/installed and cached compiler via CM (such as LLVM),
+and then run it with some input image.
+
+First, let's detect installed LLVM it via CM:
+
+```bash
+cm run script "get llvm"
+```
+or install a prebuilt version on Linux, MacOs or Windows:
+```bash
+cm run script "install prebuilt llvm" --version=14.0.0
+```
+
+We can then run this CM script to compile and run image corner detection as follows:
+```bash
+cm run script "app image corner-detection" --input=`cm find script --tags=app,image,corner-detection`/computer_mouse.pgm
+```
+
+This CM script will preset environment variables for a detected/installed compiler,
+compile our C program, run it via `run.sh` (Linux/MacOS) or `run.bat` (Windows)
+and generate an output image *output_image_with_corners.pgm* in the `output` directory of this script:
+
+```bash
+ls `cm find script --tags=app,image,corner-detection`/output
+
+image-corner output_image_with_corners.pgm
+
+```
+
+Note that this directory also contains the compiled tool "image-corner" that can now be used independently from CM if necessary.
+
+
+
+
+### Customizing sub-dependencies in a pipeline
+
+When running a CM script with many sub-dependencies similar to above example,
+we may want to specify some version constraints on sub-dependencies such as LLVM.
+
+One can use the key `"names"` in the "deps" list of any CM script meta description
+to specify multiple names for a given dependency.
+
+For example, a dependency to "get compiler" in CM script "compile-program"
+has `"names":["compiler"]` as shown [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/compile-program/_cm.json#L15).
+
+We can now use a CM script flag `--add_deps_recursive.{some name}.{some key}={some value}` or
+`--adr.{above name}.{some key}={some value}` to update a dictionary of all sub-dependencies
+that has `some name`.
+
+For example, we can now specify to use LLVM 16.0.0 for image corner detection as follows:
+```bash
+cm run script "app image corner-detection" --adr.compiler.tags=llvm --adr.compiler.version=16.0.0
+```
+
+If this compiler was not yet detected or installed by CM, it will find related scripts
+to install either a prebuilt version of LLVM or build it from sources.
+
+
+## Using Python virtual environments
+
+By default, CM scripts will install python dependencies into user space.
+This can influence other existing projects and may not be desirable.
+CM can be used inside virtual Python environments without any changes,
+but a user still need to do some manual steps to set up such environment.
+That's why we've developed a [CM script](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-venv)
+to automate creation of multiple Python virtual environments with different names:
+
+```bash
+cm run script "install python-venv" --name={some name}
+```
+
+CM will create a virtual environment using default Python and save it in CM cache.
+It is possible to create a python virtual environment with a minimal required version
+or a specific one on Linux and MacOS as follows:
+
+```bash
+cm run script "install python-venv" --version_min=3.8 --name=mlperf
+cm run script "install python-venv" --version=3.10.8 --name=mlperf2
+```
+
+In this case, CM will attempt to detect Python 3.10.8 on a system.
+If CM can't detect it, CM will then automatically download and build it
+using [this script](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-src).
+
+Now, when user runs pipelines that install Python dependencies, CM will detect
+virtual environment in the CM cache as well as native Python and will ask a user
+which one to use.
+
+It is possible to avoid such questions by using the flag `--adr.python.name=mlperf`.
+In such case, CM will propagate the name of a virtual environment to all sub-dependencies
+as shown in the next example.
+
+Instead of adding this flag to all scripts, you can specify it
+using `CM_SCRIPT_EXTRA_CMD` environment variable as follows:
+```bash
+export CM_SCRIPT_EXTRA_CMD="--adr.python.name=mlperf"
+```
+
+You can even specify min Python version required as follows:
+```bash
+export CM_SCRIPT_EXTRA_CMD="--adr.python.name=mlperf --adr.python.version_min=3.9"
+```
+
+## Assembling pipelines with other artifacts included
+
+We can now use existing CM scripts as "LEGO" blocks to assemble more complex automation pipelines and workflows
+while automatically downloading and plugging in
+and pre-/post-processing all necessary artifacts (models, data sets, frameworks, compilers, etc)
+on any supported platform (Linux, MacOS, Windows).
+
+For example, we have implemented a simple image classification application automated by the following CM script:
+[*app-image-classification-onnx-py*]( https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-onnx-py ).
+
+It is described by the following [`_cm.yaml`](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-classification-onnx-py/_cm.yaml) meta description:
+
+```yaml
+alias: app-image-classification-onnx-py
+uid: 3d5e908e472b417e
+
+automation_alias: script
+automation_uid: 5b4e0237da074764
+
+category: "Modular ML/AI applications"
+
+tags:
+- app
+- image-classification
+- onnx
+- python
+
+default_env:
+ CM_BATCH_COUNT: '1'
+ CM_BATCH_SIZE: '1'
+
+deps:
+- tags: detect,os
+- tags: get,sys-utils-cm
+- names:
+ - python
+ - python3
+ tags: get,python3
+- tags: get,cuda
+ names:
+ - cuda
+ enable_if_env:
+ USE_CUDA:
+ - yes
+- tags: get,dataset,imagenet,image-classification,original
+- tags: get,dataset-aux,imagenet-aux,image-classification
+- tags: get,ml-model,resnet50,_onnx,image-classification
+
+- tags: get,generic-python-lib,_onnxruntime
+ skip_if_env:
+ USE_CUDA:
+ - yes
+- tags: get,generic-python-lib,_onnxruntime_gpu
+ enable_if_env:
+ USE_CUDA:
+ - yes
+
+variations:
+ cuda:
+ env:
+ USE_CUDA: yes
+```
+
+
+Its `deps` pipeline runs other CM scripts to detect OS parameters, detect or install Python,
+install the latest ONNX run-time, download ResNet-50 model and the minimal ImageNet dataset (500).
+
+It also contains [`run.sh`](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-classification-onnx-py/run.sh)
+and [`run.bat`](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-classification-onnx-py/run.bat)
+to install extra Python requirements (not yet unified by CM scripts)
+and run a Python script that classifies an image from ImageNet
+or an image provided by user.
+
+Before running it, let us install Python virtual environment via CM to avoid altering
+native Python installation:
+```bash
+cm run script "install python-venv" --name=my-test
+cm show cache --tags=python
+```
+
+You can run it on any system as follows:
+
+```bash
+cm run script "python app image-classification onnx"
+
+```
+
+
+To avoid CM asking which python to use, you can force the use of Python virtual environment
+as follows:
+
+```bash
+cm run script "python app image-classification onnx" --adr.python.name=my-test
+```
+
+
+
+If you run this CM script for the first time, it may take some minutes because it will detect, download, build and cache all dependencies.
+
+When you run it again, it will plug in all cached dependencies:
+
+```bash
+cm run script "python app image-classification onnx" --adr.python.name.my-test
+
+```
+
+You can then run it with your own image as follows:
+```bash
+cm run script --tags=app,image-classification,onnx,python \
+ --adr.python.name.my-test --input={path to my JPEG image}
+```
+
+
+
+## Unifying host OS and CPU detection
+
+In order to make experiments more portable and interoperable, we need to unify
+the information about host OS and CPU across different systems.
+We are gradually improving the following two CM scripts:
+
+* [`detect-os`](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-os)
+* [`detect-cpu`](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-cpu)
+
+These two CM script have *customize.py* with preprocess and postprocess functions
+and a native run script to detect OS info and update environment variables
+and the state dictionary needed by all other CM scripts.
+
+You can run them on your platform as follows:
+
+```bash
+cm run script "detect os" --out=json
+
+...
+
+cm run script "detect cpu" --out=json
+```
+
+If some information is missing or not consistent across different platforms,
+you can improve it in a backwards compatible way. You can then submit a PR [here](https://github.com/mlcommons/ck/pulls)
+to let the community reuse your knowledge and collaboratively enhance common automation scripts, pipelines and workflows -
+that's why we called our project "Collective Knowledge".
+
+
+## Detecting, installing and caching system dependencies
+
+Many projects require installation of some system dependencies. Unfortunately, the procedure
+is different across different systems.
+
+That's why we have developed two other CM script to unify and automate this process on any system.
+
+* [`get-sys-utils-cm`]( https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm )
+* [`get-sys-utils-min`]( https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-min )
+
+They will install (minimal) system dependencies based on the OS and CPU info detected by CM scripts mentioned above.
+
+The last script is particularly useful to make applications compatible with Windows
+where many typical tools like "wget", "patch", etc are missing - they will be automatically
+download by that script.
+
+You can use them as follows:
+```bash
+cm run script "get sys-utils-min" --out=json
+cm run script "get sys-utils-cm"
+```
+
+
+
+
+## Using variations
+
+In some cases, we want the same CM script to download some artifact in a different format.
+
+For example, we may want to download and cache ResNet50 model in ONNX or PyTorch or TensorFlow or TFLite format.
+
+In such case, we use so-called `variations` in the meta description of a given CM script.
+
+For example, the CM script [`get-ml-model-resnet50`] has many variations and combinations separated by comma
+to download this model in multiple formats:
+
+* `onnx`
+* `onnx,opset-11`
+* `onnx,opset-8`
+* `pytorch`
+* `pytorch,fp32`
+* `pytorch,int8`
+* `tflite`
+* `tflite,argmax`
+* `tflite,no-argmax`
+* `tensorflow`
+* `batch_size.1`
+* `batch_size.#`
+
+These variations simply update environment variables and add more dependencies on other CM scripts
+before running `customize.py` and native scripts as described in [_cm.json]( https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-ml-model-resnet50/_cm.json#L30 ).
+
+It is possible to specify a required variation or multiple variations when running a given CM script by adding extra tags with "_" prefix.
+
+For example, you can install quantized ResNet-50 model in PyTorch int8 format as follows:
+
+```bash
+cm run script "get ml-model resnet50 _pytorch _int8" --out=json
+```
+
+You can install another FP32 variation of this model at the same time:
+```bash
+cm run script "get ml-model resnet50 _pytorch _fp32" --out=json
+```
+
+You can now find them in cache by tags and variations as follows:
+```bash
+cm show cache --tags=get,ml-model,resnet50
+cm show cache --tags=get,ml-model,resnet50,_pytorch
+cm show cache --tags=get,ml-model,resnet50,_pytorch,_fp32
+```
+
+
+
+
+
+
+
+
+
+
+
+## Running CM scripts inside containers
+
+One of the important ideas behind using a common automation language
+is to use it inside and outside containers thus avoiding the need to create
+ad-hoc manual containers and README files.
+
+We can just use base containers and let the CM automation language
+detect installed tools and connect external data with the automation pipelines and workflows.
+
+See examples of modular containers with CM language to automate the MLPerf inference benchmark from MLCommons
+[here](https://github.com/mlcommons/ck/tree/master/docker).
+
+Note that we continue working on a CM functionality to automatically generate
+Docker containers and README files when executing CM scripts
+(a prototype was successfully validated in the MLPerf inference v3.0 submission):
+
+* https://github.com/mlcommons/cm4mlops/tree/main/script/build-dockerfile
+* https://github.com/mlcommons/cm4mlops/tree/main/script/build-docker-image
+
+
+
+
+## Getting help about other script automation flags
+
+You can get help about all flags used to customize execution
+of a given CM script from the command line as follows:
+
+```bash
+cm run script --help
+```
+
+Some flags are useful to make it easier to debug scripts and save output in files.
+
+You can find more info about CM script execution flow in this [document](README-specs.md).
+
+
+
+
+
+
+
+
+
+
+
+
+## Further reading
+
+* [CM "script" automation specification](README-specs.md)
+* [MLCommons CM script sources](https://github.com/mlcommons/cm4mlops/tree/main/script)
+* [List of portable and reusable CM scripts from MLCommons](https://access.cknowledge.org/playground/?action=scripts)
+* [CM "cache" automation](../cache/README-extra.md)
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/README-specs.md b/cmx4mlops/cmx4mlops/repo/automation/script/README-specs.md
new file mode 100644
index 0000000000..4b40feeba2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/README-specs.md
@@ -0,0 +1,79 @@
+# CM "script" automation specification
+
+Please check the [CM documentation](https://github.com/mlcommons/ck/tree/master/docs#collective-mind-language-cm)
+for more details about the CM automation language.
+
+See the CM script introduction [here](README-extra.md).
+
+See the [automatically generated catalog](https://github.com/mlcommons/ck/blob/master/docs/list_of_scripts.md) of all CM scripts from MLCommons.
+
+## Getting started with CM scripts
+
+* A CM script is identified by a set of tags and by unique ID.
+* Further each CM script can have multiple variations and they are identified by variation tags which are treated in the same way as tags and identified by a `_` prefix.
+
+### CM script execution flow
+* When a CM script is invoked (either by tags or by unique ID), its `_cm.json` is processed first which will check for any `deps` script and if there are, then they are executed in order.
+* Once all the `deps` scripts are executed, `customize.py` file is checked and if existing `preprocess` function inside it is executed if present.
+* Then any `prehook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps`
+* After this, keys in `env` dictionary is exported as `ENV` variables and `run` file if exists is executed.
+* Once run file execution is done, any `posthook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps`
+* Then `postprocess` function inside customize.py is executed if present.
+* After this stage any `post_deps` CM scripts mentioned in `_cm.json` is executed.
+
+** If a script is already cached, then the `preprocess`, `run file` and `postprocess` executions won't happen and only the dependencies marked as `dynamic` will be executed from `deps`, `prehook_deps`, `posthook_deps` and `postdeps`.
+
+### Input flags
+When we run a CM script we can also pass inputs to it and any input added in `input_mapping` dictionary inside `_cm.json` gets converted to the corresponding `ENV` variable.
+
+### Conditional execution of any `deps`, `post_deps`
+We can use `skip_if_env` dictionary inside any `deps`, `prehook_deps`, `posthook_deps` or `post_deps` to make its execution conditional
+
+### Versions
+We can specify any specific version of a script using `version`. `version_max` and `version_min` are also possible options.
+* When `version_min` is given, any version above this if present in the cache or detected in the system can be chosen. If nothing is detected `default_version` if present and if above `version_min` will be used for installation. Otherwise `version_min` will be used as `version`.
+* When `version_max` is given, any version below this if present in the cache or detected in the system can be chosen. If nothing is detected `default_version` if present and if below `version_max` will be used for installation. Otherwise `version_max_usable` (additional needed input for `version_max`) will be used as `version`.
+
+### Variations
+* Variations are used to customize CM script and each unique combination of variations uses a unique cache entry. Each variation can turn on `env` keys also any other meta including dependencies specific to it. Variations are turned on like tags but with a `_` prefix. For example, if a script is having tags `"get,myscript"`, to call the variation `"test"` inside it, we have to use tags `"get,myscript,_test"`.
+
+#### Variation groups
+`group` is a key to map variations into a group and at any time only one variation from a group can be used in the variation tags. For example, both `cpu` and `cuda` can be two variations under the `device` group, but user can at any time use either `cpu` or `cuda` as variation tags but not both.
+
+#### Dynamic variations
+Sometimes it is difficult to add all variations needed for a script like say `batch_size` which can take many different values. To handle this case, we support dynamic variations using '#' where '#' can be dynamically replaced by any string. For example, `"_batch_size.8"` can be used as a tag to turn on the dynamic variation `"_batch_size.#"`.
+
+### ENV flow during CM script execution
+* [TBD] Issue added [here](https://github.com/mlcommons/ck/issues/382)
+* During a given script execution incoming `env` dictionary is saved `(saved_env)` and all the updates happens on a copy of it.
+* Once a script execution is over (which includes all the dependent script executions as well), newly created keys and any updated keys are merged with the `saved_env` provided the keys are mentioned in `new_env_keys`
+* Same behaviour applies to `state` dictionary.
+
+#### Special env keys
+* Any env key with a prefix `CM_TMP_*` and `CM_GIT_*` are not passed by default to any dependency. These can be force passed by adding the key(s) to the `force_env_keys` list of the concerned dependency.
+* Similarly we can avoid any env key from being passed to a given dependency by adding the prefix of the key in the `clean_env_keys` list of the concerned dependency.
+* `--input` is automatically converted to `CM_INPUT` env key
+* `version` is converted to `CM_VERSION`, ``version_min` to `CM_VERSION_MIN` and `version_max` to `CM_VERSION_MAX`
+* If `env['CM_GH_TOKEN']=TOKEN_VALUE` is set then git URLs (specified by `CM_GIT_URL`) are changed to add this token.
+* If `env['CM_GIT_SSH']=yes`, then git URLs are changed to SSH from HTTPS.
+
+### Script Meta
+#### Special keys in script meta
+* TBD: `reuse_version`, `inherit_variation_tags`, `update_env_tags_from_env`
+
+### How cache works?
+* If `cache=true` is set in a script meta, the result of the script execution is cached for further use.
+* For a cached script, `env` and `state` updates are done using `new_env` and `new_state` dictionaries which are stored in the `cm-cached.json` file inside the cached folder.
+* By using `--new` input, a new cache entry can be forced even when an old one exist.
+* By default no depndencies are run for a cached entry unless `dynamic` key is set for it.
+
+### Updating ENV from inside the run script
+* [TBD]
+
+
+### Script workflow (env, deps, native scripts)
+
+
+
+
+© 2022-24 [MLCommons](https://mlcommons.org)
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/README.md b/cmx4mlops/cmx4mlops/repo/automation/script/README.md
new file mode 100644
index 0000000000..d4a4c62bc7
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/README.md
@@ -0,0 +1,427 @@
+*This README is automatically generated - don't edit! See [extra README](README-extra.md) for extra notes!*
+
+### Automation actions
+
+#### run
+
+ * CM CLI: ```cm run script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L77))
+ * CM CLI with UID: ```cm run script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L77))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'run'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L77)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### version
+
+ * CM CLI: ```cm version script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2199))
+ * CM CLI with UID: ```cm version script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2199))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'version'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2199)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### search
+
+ * CM CLI: ```cm search script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2227))
+ * CM CLI with UID: ```cm search script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2227))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'search'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2227)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### test
+
+ * CM CLI: ```cm test script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2346))
+ * CM CLI with UID: ```cm test script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2346))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'test'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2346)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### native_run
+
+ * CM CLI: ```cm native_run script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2412))
+ * CM CLI with UID: ```cm native_run script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2412))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'native_run'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2412)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### add
+
+ * CM CLI: ```cm add script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2485))
+ * CM CLI with UID: ```cm add script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2485))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'add'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2485)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### run_native_script
+
+ * CM CLI: ```cm run_native_script script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3270))
+ * CM CLI with UID: ```cm run_native_script script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3270))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'run_native_script'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3270)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### find_file_in_paths
+
+ * CM CLI: ```cm find_file_in_paths script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3314))
+ * CM CLI with UID: ```cm find_file_in_paths script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3314))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'find_file_in_paths'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3314)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### detect_version_using_script
+
+ * CM CLI: ```cm detect_version_using_script script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3533))
+ * CM CLI with UID: ```cm detect_version_using_script script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3533))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'detect_version_using_script'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3533)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### find_artifact
+
+ * CM CLI: ```cm find_artifact script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3606))
+ * CM CLI with UID: ```cm find_artifact script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3606))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'find_artifact'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3606)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### find_file_deep
+
+ * CM CLI: ```cm find_file_deep script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3764))
+ * CM CLI with UID: ```cm find_file_deep script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3764))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'find_file_deep'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3764)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### find_file_back
+
+ * CM CLI: ```cm find_file_back script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3822))
+ * CM CLI with UID: ```cm find_file_back script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3822))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'find_file_back'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3822)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### parse_version
+
+ * CM CLI: ```cm parse_version script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3863))
+ * CM CLI with UID: ```cm parse_version script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3863))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'parse_version'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3863)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### update_deps
+
+ * CM CLI: ```cm update_deps script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3917))
+ * CM CLI with UID: ```cm update_deps script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3917))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'update_deps'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3917)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### get_default_path_list
+
+ * CM CLI: ```cm get_default_path_list script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3937))
+ * CM CLI with UID: ```cm get_default_path_list script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3937))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'get_default_path_list'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3937)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### doc
+
+ * CM CLI: ```cm doc script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3948))
+ * CM CLI with UID: ```cm doc script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3948))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'doc'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3948)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### gui
+
+ * CM CLI: ```cm gui script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3976))
+ * CM CLI with UID: ```cm gui script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3976))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'gui'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3976)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### dockerfile
+
+ * CM CLI: ```cm dockerfile script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4013))
+ * CM CLI with UID: ```cm dockerfile script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4013))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'dockerfile'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4013)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### docker
+
+ * CM CLI: ```cm docker script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4041))
+ * CM CLI with UID: ```cm docker script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4041))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'docker'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4041)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### prepare
+
+ * CM CLI: ```cm prepare script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4095))
+ * CM CLI with UID: ```cm prepare script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4095))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'prepare'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4095)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### clean_some_tmp_files
+
+ * CM CLI: ```cm clean_some_tmp_files script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4106))
+ * CM CLI with UID: ```cm clean_some_tmp_files script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4106))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'clean_some_tmp_files'
+ 'automation':'script,5b4e0237da074764'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4106)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+### Maintainers
+
+* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/script/_cm.json
new file mode 100644
index 0000000000..c1419f8f8f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/_cm.json
@@ -0,0 +1,18 @@
+{
+ "alias": "script",
+ "automation_alias": "automation",
+ "automation_uid": "bbeb15d8f0a944a4",
+ "min_cm_version": "2.2.0",
+ "deps": {
+ "cache": "cache,541d6f712a6b464e"
+ },
+ "desc": "Making native scripts more portable, interoperable and deterministic",
+ "developers": "Arjun Suresh and Grigori Fursin",
+ "actions_with_help":["run", "docker"],
+ "prototype": true,
+ "sort": 1000,
+ "tags": [
+ "automation"
+ ],
+ "uid": "5b4e0237da074764"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/assets/scripts-workflow.png b/cmx4mlops/cmx4mlops/repo/automation/script/assets/scripts-workflow.png
new file mode 100644
index 0000000000..60d0ef7157
Binary files /dev/null and b/cmx4mlops/cmx4mlops/repo/automation/script/assets/scripts-workflow.png differ
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile b/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile
new file mode 100644
index 0000000000..a93507dc26
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile
@@ -0,0 +1,34 @@
+FROM ubuntu:23.04
+
+# Automatically generated by the CM workflow automation meta-framework
+# https://github.com/mlcommons/ck
+
+LABEL github=""
+LABEL maintainer=""
+LABEL license=""
+
+SHELL ["/bin/bash", "-c"]
+
+ARG CM_GH_TOKEN
+ARG CM_ADD_DOCKER_GROUP_ID=""
+
+# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes
+# Install system dependencies
+RUN apt-get update -y
+RUN apt-get install -y python3 python3-pip git sudo wget
+
+# Setup docker environment
+ENTRYPOINT ["/bin/bash", "-c"]
+ENV TZ="US/Pacific"
+ENV PATH="${PATH}:/home/cmuser/.local/bin"
+RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone
+
+# Setup docker user
+RUN groupadd ${CM_ADD_DOCKER_GROUP_ID} cm
+RUN useradd -g cm --create-home --shell /bin/bash cmuser
+RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+USER cmuser:cm
+WORKDIR /home/cmuser
+
+# Install python packages
+RUN python3 -m pip install --user cmind requests giturlparse tabulate --break-system-packages
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.build.bat b/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.build.bat
new file mode 100644
index 0000000000..bd4ea665d8
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.build.bat
@@ -0,0 +1 @@
+docker build -f "ubuntu-23.04.Dockerfile" -t "cknowledge/test-cm-script:ubuntu-23.04-cm-dev" .
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.build.sh b/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.build.sh
new file mode 100644
index 0000000000..92ec69ba1f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.build.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+docker build -f "ubuntu-23.04.Dockerfile" -t "cknowledge/test-cm-script:ubuntu-23.04-cm-dev" .
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.run.bat b/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.run.bat
new file mode 100644
index 0000000000..c7c3fd1989
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.run.bat
@@ -0,0 +1 @@
+docker run -it --entrypoint "" cknowledge/test-cm-script:ubuntu-23.04-cm-dev bash
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.run.sh b/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.run.sh
new file mode 100644
index 0000000000..69425443a0
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.run.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+docker run -it --entrypoint "" cknowledge/test-cm-script:ubuntu-23.04-cm-dev bash
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/module.py b/cmx4mlops/cmx4mlops/repo/automation/script/module.py
new file mode 100644
index 0000000000..b08875892d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/module.py
@@ -0,0 +1,6587 @@
+#
+# CM "script" automation helps users to encode their MLOps, DevOps and other knowledge
+# as portable and reusable automation recipes with simple tags, native scripts
+# and a unified CLI, Python API and JSON/YAML meta descriptions.
+#
+# This is a stable prototype of the CM script automation being developed by Grigori Fursin and Arjun Suresh
+#
+# TBD: when we have bandwidth and resources, we should refactor it
+# and make it cleaner and simpler while keeping full backwards compatibility.
+#
+# Author: Grigori Fursin
+# Contributors: Arjun Suresh, Anandhu Sooraj
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+import os
+import logging
+
+from cmind.automation import Automation
+from cmind import utils
+from cmind import __version__ as current_cm_version
+
+
+class CAutomation(Automation):
+ """
+ CM "script" automation actions
+ (making native scripts more portable, deterministic, reusable and reproducible)
+ """
+
+ ############################################################
+ def __init__(self, cmind, automation_file):
+ super().__init__(cmind, __file__)
+ logging.basicConfig(level=logging.INFO)
+ self.os_info = {}
+ self.run_state = {}
+ self.run_state['deps'] = []
+ self.run_state['fake_deps'] = False
+ self.run_state['parent'] = None
+ self.run_state['version_info'] = []
+
+ self.file_with_cached_state = 'cm-cached-state.json'
+
+ self.tmp_file_env = 'tmp-env'
+ self.tmp_file_env_all = 'tmp-env-all'
+ self.tmp_file_run = 'tmp-run'
+ self.tmp_file_state = 'tmp-state.json'
+
+ self.tmp_file_run_state = 'tmp-run-state.json'
+ self.tmp_file_run_env = 'tmp-run-env.out'
+ self.tmp_file_ver = 'tmp-ver.out'
+
+ self.__version__ = "1.3.2"
+
+ self.local_env_keys = ['CM_VERSION',
+ 'CM_VERSION_MIN',
+ 'CM_VERSION_MAX',
+ 'CM_VERSION_MAX_USABLE',
+ 'CM_DETECTED_VERSION',
+ 'CM_INPUT',
+ 'CM_OUTPUT',
+ 'CM_OUTBASENAME',
+ 'CM_OUTDIRNAME',
+ 'CM_NAME',
+ 'CM_EXTRA_CACHE_TAGS',
+ 'CM_TMP_*',
+ 'CM_GIT_*',
+ 'CM_RENEW_CACHE_ENTRY']
+
+ self.input_flags_converted_to_tmp_env = ['path']
+
+ self.input_flags_converted_to_env = ['input',
+ 'output',
+ 'outdirname',
+ 'outbasename',
+ 'name',
+ 'extra_cache_tags',
+ 'skip_compile',
+ 'skip_run',
+ 'accept_license',
+ 'skip_system_deps',
+ 'git_ssh',
+ 'gh_token']
+
+ ############################################################
+
+ def run(self, i):
+ """
+ Run CM script
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ (artifact) (str): specify CM script (CM artifact) explicitly
+
+ (tags) (str): tags to find an CM script (CM artifact)
+
+ (env) (dict): global environment variables (can/will be updated by a given script and dependencies)
+ (const) (dict): constant environment variable (will be preserved and persistent for a given script and dependencies)
+
+ (state) (dict): global state dictionary (can/will be updated by a given script and dependencies)
+ (const_state) (dict): constant state (will be preserved and persistent for a given script and dependencies)
+
+ (add_deps) (dict): {"name": {"tag": "tag(s)"}, "name": {"version": "version_no"}, ...}
+ (add_deps_recursive) (dict): same as add_deps but is passed recursively onto dependencies as well
+
+ (version) (str): version to be added to env.CM_VERSION to specialize this flow
+ (version_min) (str): min version to be added to env.CM_VERSION_MIN to specialize this flow
+ (version_max) (str): max version to be added to env.CM_VERSION_MAX to specialize this flow
+ (version_max_usable) (str): max USABLE version to be added to env.CM_VERSION_MAX_USABLE
+
+ (path) (str): list of paths to be added to env.CM_TMP_PATH to specialize this flow
+
+ (input) (str): converted to env.CM_INPUT (local env)
+ (output) (str): converted to env.CM_OUTPUT (local env)
+
+ (outbasename) (str): converted to env.CM_OUTBASENAME (local env)
+ (outdirname) (str): converted to env.CM_OUTDIRNAME (local env)
+
+ (extra_cache_tags) (str): converted to env.CM_EXTRA_CACHE_TAGS and used to add to caching (local env)
+
+ (name) (str): taken from env.CM_NAME and/or converted to env.CM_NAME (local env)
+ Added to extra_cache_tags with "name-" prefix .
+ Useful for python virtual env (to create multiple entries)
+
+ (quiet) (bool): if True, set env.CM_QUIET to "yes" and attempt to skip questions
+ (the developers have to support it in pre/post processing and scripts)
+
+ (skip_cache) (bool): if True, skip caching and run in current directory
+ (force_cache) (bool): if True, force caching if can_force_cache=true in script meta
+
+ (skip_remembered_selections) (bool): if True, skip remembered selections
+ (uses or sets env.CM_TMP_SKIP_REMEMBERED_SELECTIONS to "yes")
+
+ (new) (bool): if True, skip search for cached and run again
+ (renew) (bool): if True, rewrite cache entry if exists
+
+ (dirty) (bool): if True, do not clean files
+
+ (save_env) (bool): if True, save env and state to tmp-env.sh/bat and tmp-state.json
+ (shell) (bool): if True, save env with cmd/bash and run it
+
+ (recursion) (bool): True if recursive call.
+ Useful when preparing the global bat file or Docker container
+ to save/run it in the end.
+
+ (recursion_spaces) (str, internal): adding ' ' during recursion for debugging
+
+ (remembered_selections) (list): remember selections of cached outputs
+
+ (print_env) (bool): if True, print aggregated env before each run of a native script
+
+ (fake_run) (bool): if True, will run the dependent scripts but will skip the main run script
+ (prepare) (bool): the same as fake_run
+ (fake_deps) (bool): if True, will fake run the dependent scripts
+ (run_state) (dict): Internal run state
+
+ (debug_script_tags) (str): if !='', run cmd/bash before executing a native command
+ inside a script specified by these tags
+
+ (debug_script) (bool): if True, debug current script (set debug_script_tags to the tags of a current script)
+ (debug_uid) (str): if True, set CM_TMP_DEBUG_UID to this number to enable
+ remote python debugging of scripts and wrapped apps/tools
+ (detected_versions) (dict): All the used scripts and their detected_versions
+
+ (verbose) (bool): if True, prints all tech. info about script execution (False by default)
+ (v) (bool): the same as verbose
+
+ (time) (bool): if True, print script execution time (or if verbose == True)
+ (space) (bool): if True, print used disk space for this script (or if verbose == True)
+
+ (ignore_script_error) (bool): if True, ignore error code in native tools and scripts
+ and finish a given CM script. Useful to test/debug partial installations
+
+ (json) (bool): if True, print output as JSON
+ (j) (bool): if True, print output as JSON
+
+ (pause) (bool): if True, pause at the end of the main script (Press Enter to continue)
+
+ (repro) (bool): if True, dump cm-run-script-input.json, cm-run_script_output.json,
+ cm-run-script-state.json, cm-run-script-info.json
+ to improve the reproducibility of results
+
+ (repro_prefix) (str): if !='', use it to record above files {repro-prefix)-input.json ...
+ (repro_dir) (str): if !='', use this directory to dump info (default = 'cm-repro')
+
+ (dump_version_info) (bool): dump info about resolved versions of tools in dependencies
+
+ (print_deps) (bool): if True, will print the CM run commands of the direct dependent scripts
+
+ (print_readme) (bool): if True, will print README with all CM steps (deps) to run a given script
+
+ (script_call_prefix) (str): how to call script in logs and READMEs (cm run script)
+
+ (skip_sys_utils) (bool): if True, set env['CM_SKIP_SYS_UTILS']='yes'
+ to skip CM sys installation
+ (skip_sudo) (bool): if True, set env['CM_TMP_SKIP_SUDO']='yes'
+ to let scripts deal with that
+
+ (silent) (bool): if True, attempt to suppress all info if supported
+ (sets CM_TMP_SILENT=yes)
+ (s) (bool): the same as 'silent'
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * (skipped) (bool): if true, this script was skipped
+
+ * new_env (dict): new environment (delta from a collective script)
+ * new_state (dict): new state (delta from a collective script)
+
+ * env (dict): global env (updated by this script - includes new_env)
+ * state (dict): global state (updated by this script - includes new_state)
+
+ """
+
+ r = self._run(i)
+
+ return r
+
+ ############################################################
+
+ def _run(self, i):
+
+ from cmind import utils
+ import copy
+ import time
+ import shutil
+
+ # Check if save input/output to file
+ repro = i.get('repro', False)
+ repro_prefix = ''
+
+ if repro:
+ repro_prefix = i.get('repro_prefix', '')
+ if repro_prefix == '':
+ repro_prefix = 'cm-run-script'
+
+ repro_dir = i.get('repro_dir', '')
+ if repro_dir == '':
+ repro_dir = os.path.join(os.getcwd(), 'cm-repro')
+ if not os.path.isdir(repro_dir):
+ os.makedirs(repro_dir)
+
+ repro_prefix = os.path.join(repro_dir, repro_prefix)
+
+ if repro_prefix != '':
+ dump_repro_start(repro_prefix, i)
+
+ recursion = i.get('recursion', False)
+
+ # If first script run, check if can write to current directory
+ if not recursion and not i.get('skip_write_test', False):
+ if not can_write_to_current_directory():
+ return {
+ 'return': 1, 'error': 'Current directory "{}" is not writable - please change it'.format(os.getcwd())}
+
+ # Check if has default config
+ r = self.cmind.access({'action': 'load',
+ 'automation': 'cfg,88dce9c160324c5d',
+ 'artifact': 'default'})
+ if r['return'] == 0:
+ config = r['config']
+
+ script_input = config.get('script', {})
+
+ if len(script_input) > 0:
+ utils.merge_dicts({'dict1': i, 'dict2': script_input})
+
+ recursion_int = int(i.get('recursion_int', 0)) + 1
+
+ start_time = time.time()
+
+ # Check extra input from environment variable CM_SCRIPT_EXTRA_CMD
+ # Useful to set up default flags such as the name of virtual enviroment
+ extra_cli = os.environ.get('CM_SCRIPT_EXTRA_CMD', '').strip()
+ if extra_cli != '':
+ from cmind import cli
+ r = cli.parse(extra_cli)
+ if r['return'] > 0:
+ return r
+
+ cm_input = r['cm_input']
+
+ utils.merge_dicts({'dict1': i,
+ 'dict2': cm_input,
+ 'append_lists': True,
+ 'append_unique': True})
+
+ # Check simplified CMD: cm run script "get compiler"
+ # If artifact has spaces, treat them as tags!
+ artifact = i.get('artifact', '')
+ if ' ' in artifact: # or ',' in artifact:
+ del (i['artifact'])
+ if 'parsed_artifact' in i:
+ del (i['parsed_artifact'])
+ # Force substitute tags
+ i['tags'] = artifact.replace(' ', ',')
+
+ # Check if has extra tags as a second artifact
+ # Example: cmr . "_python _tiny"
+
+ parsed_artifacts = i.get('parsed_artifacts', [])
+ if len(parsed_artifacts) > 0:
+ extra_tags = parsed_artifacts[0][0][0]
+ if ' ' in extra_tags or ',' in extra_tags:
+ # Add tags
+ x = i.get('tags', '')
+ if x != '':
+ x += ','
+ i['tags'] = x + extra_tags.replace(' ', ',')
+
+ # Recursion spaces needed to format log and print
+ recursion_spaces = i.get('recursion_spaces', '')
+ # Caching selections to avoid asking users again
+ remembered_selections = i.get('remembered_selections', [])
+
+ # Get current env and state before running this script and sub-scripts
+ env = i.get('env', {})
+ state = i.get('state', {})
+ const = i.get('const', {})
+ const_state = i.get('const_state', {})
+
+ # Save current env and state to detect new env and state after running
+ # a given script
+ saved_env = copy.deepcopy(env)
+ saved_state = copy.deepcopy(state)
+
+ for key in ["env", "state", "const", "const_state"]:
+ if i.get("local_" + key):
+ if not i.get(key, {}):
+ i[key] = {}
+ utils.merge_dicts({'dict1': i[key],
+ 'dict2': i['local_' + key],
+ 'append_lists': True,
+ 'append_unique': True})
+
+ add_deps = i.get('ad', {})
+ if not add_deps:
+ add_deps = i.get('add_deps', {})
+ else:
+ utils.merge_dicts({'dict1': add_deps, 'dict2': i.get(
+ 'add_deps', {}), 'append_lists': True, 'append_unique': True})
+
+ add_deps_recursive = i.get('adr', {})
+ if not add_deps_recursive:
+ add_deps_recursive = i.get('add_deps_recursive', {})
+ else:
+ utils.merge_dicts({'dict1': add_deps_recursive, 'dict2': i.get(
+ 'add_deps_recursive', {}), 'append_lists': True, 'append_unique': True})
+
+ save_env = i.get('save_env', False)
+
+ print_env = i.get('print_env', False)
+
+ show_time = i.get('time', False)
+ show_space = i.get('space', False)
+
+ if not recursion and show_space:
+ start_disk_stats = shutil.disk_usage("/")
+
+ extra_recursion_spaces = ' ' # if verbose else ''
+
+ skip_cache = i.get('skip_cache', False)
+ force_cache = i.get('force_cache', False)
+
+ fake_run = i.get('fake_run', False)
+ fake_run = i.get(
+ 'fake_run',
+ False) if 'fake_run' in i else i.get(
+ 'prepare',
+ False)
+ if fake_run:
+ env['CM_TMP_FAKE_RUN'] = 'yes'
+
+ debug_uid = i.get('debug_uid', '')
+ if debug_uid != '':
+ r = _update_env(env, 'CM_TMP_DEBUG_UID', debug_uid)
+ if r['return'] > 0:
+ return r
+
+ fake_deps = i.get('fake_deps', False)
+ if fake_deps:
+ env['CM_TMP_FAKE_DEPS'] = 'yes'
+
+ if str(i.get('skip_sys_utils', '')).lower() in ['true', 'yes']:
+ env['CM_SKIP_SYS_UTILS'] = 'yes'
+ if str(i.get('skip_sudo', '')).lower() in ['true', 'yes']:
+ env['CM_TMP_SKIP_SUDO'] = 'yes'
+
+ run_state = i.get('run_state', self.run_state)
+ if not run_state.get('version_info', []):
+ run_state['version_info'] = []
+ if run_state.get('parent', '') == '':
+ run_state['parent'] = None
+ if fake_deps:
+ run_state['fake_deps'] = True
+
+ # Check verbose and silent
+ verbose = False
+
+ silent = True if str(i.get('silent', '')).lower() in [
+ 'true', 'yes', 'on'] else False
+
+ if not silent:
+ silent = True if str(i.get('s', '')).lower() in [
+ 'true', 'yes', 'on'] else False
+
+ if silent:
+ if 'verbose' in i:
+ del (i['verbose'])
+ if 'v' in i:
+ del (i['v'])
+ env['CM_TMP_SILENT'] = 'yes'
+ run_state['tmp_silent'] = True
+
+ if 'verbose' in i:
+ verbose = i['verbose']
+ elif 'v' in i:
+ verbose = i['v']
+
+ if verbose:
+ env['CM_VERBOSE'] = 'yes'
+ run_state['tmp_verbose'] = True
+ logging.getLogger().setLevel(logging.DEBUG)
+
+ print_deps = i.get('print_deps', False)
+ print_versions = i.get('print_versions', False)
+ print_readme = i.get('print_readme', False)
+ dump_version_info = i.get('dump_version_info', False)
+
+ new_cache_entry = i.get('new', False)
+ renew = i.get('renew', False)
+
+ cmd = i.get('cmd', '')
+ # Capturing the input command if it is coming from an access function
+ if not cmd and 'cmd' in i.get('input', ''):
+ i['cmd'] = i['input']['cmd']
+ cmd = i['cmd']
+
+ debug_script_tags = i.get('debug_script_tags', '')
+
+ detected_versions = i.get('detected_versions', {})
+
+ ignore_script_error = i.get('ignore_script_error', False)
+
+ # Detect current path and record in env for further use in native
+ # scripts
+ current_path = os.path.abspath(os.getcwd())
+ r = _update_env(env, 'CM_TMP_CURRENT_PATH', current_path)
+ if r['return'] > 0:
+ return r
+
+ # Check if quiet mode
+ quiet = i.get(
+ 'quiet',
+ False) if 'quiet' in i else (
+ env.get(
+ 'CM_QUIET',
+ '').lower() == 'yes')
+ if quiet:
+ env['CM_QUIET'] = 'yes'
+
+ skip_remembered_selections = i.get('skip_remembered_selections', False) if 'skip_remembered_selections' in i \
+ else (env.get('CM_SKIP_REMEMBERED_SELECTIONS', '').lower() == 'yes')
+ if skip_remembered_selections:
+ env['CM_SKIP_REMEMBERED_SELECTIONS'] = 'yes'
+
+ # Prepare debug info
+ parsed_script = i.get('parsed_artifact')
+ parsed_script_alias = parsed_script[0][0] if parsed_script is not None else ''
+
+ # Get and cache minimal host OS info to be able to run scripts and
+ # manage OS environment
+ if len(self.os_info) == 0:
+ r = self.cmind.access({'action': 'get_host_os_info',
+ 'automation': 'utils,dc2743f8450541e3'})
+ if r['return'] > 0:
+ return r
+
+ self.os_info = r['info']
+
+ os_info = self.os_info
+
+ # Bat extension for this host OS
+ bat_ext = os_info['bat_ext']
+
+ # Add permanent env from OS (such as CM_WINDOWS:"yes" on Windows)
+ env_from_os_info = os_info.get('env', {})
+ if len(env_from_os_info) > 0:
+ env.update(env_from_os_info)
+
+ # take some env from the user environment
+ keys = [
+ "GH_TOKEN",
+ "ftp_proxy",
+ "FTP_PROXY",
+ "http_proxy",
+ "HTTP_PROXY",
+ "https_proxy",
+ "HTTPS_PROXY",
+ "no_proxy",
+ "NO_PROXY",
+ "socks_proxy",
+ "SOCKS_PROXY"]
+ for key in keys:
+ if os.environ.get(key, '') != '' and env.get(key, '') == '':
+ env[key] = os.environ[key]
+
+ # Check path/input/output in input and pass to env
+ for key in self.input_flags_converted_to_tmp_env:
+ value = i.get(key, '').strip()
+ if value != '':
+ env['CM_TMP_' + key.upper()] = value
+
+ for key in self.input_flags_converted_to_env:
+ value = i.get(
+ key,
+ '').strip() if isinstance(
+ i.get(
+ key,
+ ''),
+ str) else i.get(
+ key,
+ '')
+ if value:
+ env[f"CM_{key.upper()}"] = value
+
+ r = update_env_with_values(env)
+ if r['return'] > 0:
+ return r
+
+ #######################################################################
+ # Check if we want to skip cache (either by skip_cache or by fake_run)
+ force_skip_cache = True if skip_cache else False
+ force_skip_cache = True if fake_run else force_skip_cache
+
+ #######################################################################
+ # Find CM script(s) based on their tags and variations to get their meta and customize this workflow.
+ # We will need to decide how to select if more than 1 (such as "get compiler")
+ #
+ # Note: this local search function will separate tags and variations
+ #
+ # STEP 100 Input: Search sripts by i['tags'] (includes variations starting from _) and/or i['parsed_artifact']
+ # tags_string = i['tags']
+
+ tags_string = i.get('tags', '').strip()
+
+ ii = utils.sub_input(i, self.cmind.cfg['artifact_keys'])
+
+ ii['tags'] = tags_string
+ ii['out'] = None
+
+ # if cm run script without tags/artifact and with --help
+ if len(ii.get('parsed_artifact', [])) == 0 and ii.get(
+ 'tags', '') == '' and i.get('help', False):
+ return utils.call_internal_module(
+ self, __file__, 'module_help', 'print_help', {'meta': {}, 'path': ''})
+
+ r = self.search(ii)
+ if r['return'] > 0:
+ return r
+
+ # Search function will return
+
+ list_of_found_scripts = r['list']
+
+ script_tags = r['script_tags']
+ script_tags_string = ','.join(script_tags)
+
+ variation_tags = r['variation_tags']
+
+# # Print what was searched!
+# cm_script_info = 'CM script'
+#
+# x = 'with'
+# if parsed_script_alias !='' :
+# cm_script_info += ' '+x+' alias "{}"'.format(parsed_script_alias)
+# x = 'and'
+#
+# if len(script_tags)>0:
+# cm_script_info += ' '+x+' tags "{}"'.format(script_tags_string.replace(',',' '))
+# x = 'and'
+#
+# if len(variation_tags)>0:
+# x_variation_tags = ['_'+v for v in variation_tags]
+# cm_script_info += ' '+x+' variations "{}"'.format(" ".join(x_variation_tags))
+#
+# if verbose:
+# logging.info('')
+# logging.info(recursion_spaces + '* Searching for ' + cm_script_info)
+# else:
+# logging.info(recursion_spaces + '* Running ' + cm_script_info)
+
+ cm_script_info = i.get('script_call_prefix', '').strip()
+ if cm_script_info == '':
+ cm_script_info = 'cm run script'
+ if not cm_script_info.endswith(' '):
+ cm_script_info += ' '
+
+ x = '"'
+ y = ' '
+ if parsed_script_alias != '':
+ cm_script_info += parsed_script_alias
+ x = ' --tags="'
+ y = ','
+
+ if len(script_tags) > 0 or len(variation_tags) > 0:
+ cm_script_info += x
+
+ if len(script_tags) > 0:
+ cm_script_info += script_tags_string.replace(',', y)
+
+ if len(variation_tags) > 0:
+ if len(script_tags) > 0:
+ cm_script_info += ' '
+
+ x_variation_tags = ['_' + v for v in variation_tags]
+ cm_script_info += y.join(x_variation_tags)
+
+ cm_script_info += '"'
+
+# if verbose:
+# logging.info('')
+
+ if not run_state.get('tmp_silent', False):
+ logging.info(recursion_spaces + '* ' + cm_script_info)
+
+ #######################################################################
+ # Report if scripts were not found or there is an ambiguity with UIDs
+ if not r['found_scripts']:
+ return {
+ 'return': 1, 'error': 'no scripts were found with above tags (when variations ignored)'}
+
+ if len(list_of_found_scripts) == 0:
+ return {
+ 'return': 16, 'error': 'no scripts were found with above tags and variations\n' + r.get('warning', '')}
+
+ # Sometimes there is an ambiguity when someone adds a script
+ # while duplicating a UID. In such case, we will return >1 script
+ # and will start searching in the cache ...
+ # We are detecing such cases here:
+ if len(list_of_found_scripts) > 1 and script_tags_string == '' and parsed_script_alias != '' and '?' not in parsed_script_alias and '*' not in parsed_script_alias:
+ x = 'Ambiguity in the following scripts have the same UID - please change that in _cm.json or _cm.yaml:\n'
+ for y in list_of_found_scripts:
+ x += ' * ' + y.path + '\n'
+
+ return {'return': 1, 'error': x}
+
+ # STEP 100 Output: list_of_found_scripts based on tags (with variations) and/or parsed_artifact
+ # script_tags [] - contains tags without variations (starting from _ such as _cuda)
+ # variation_tags [] - contains only variations tags (without _)
+ # string_tags_string [str] (joined script_tags)
+
+ #######################################################################
+ # Sort scripts for better determinism
+ list_of_found_scripts = sorted(list_of_found_scripts, key=lambda a: (a.meta.get('sort', 0),
+ a.path))
+ logging.debug(recursion_spaces +
+ ' - Number of scripts found: {}'.format(len(list_of_found_scripts)))
+
+ # Check if script selection is remembered
+ if not skip_remembered_selections and len(list_of_found_scripts) > 1:
+ for selection in remembered_selections:
+ if selection['type'] == 'script' and set(
+ selection['tags'].split(',')) == set(script_tags_string.split(',')):
+ # Leave 1 entry in the found list
+ list_of_found_scripts = [selection['cached_script']]
+ logging.debug(
+ recursion_spaces +
+ ' - Found remembered selection with tags: {}'.format(script_tags_string))
+ break
+
+ # STEP 200 Output: potentially pruned list_of_found_scripts if
+ # selection of multple scripts was remembered
+
+ # STEP 300: If more than one CM script found (example: "get compiler"),
+ # first, check if selection was already remembered!
+ # second, check in cache to prune scripts
+
+ # STEP 300 input: lit_of_found_scripts
+
+ select_script = 0
+
+ # If 1 script found and script_tags == '', pick them from the meta
+ if script_tags_string == '' and len(list_of_found_scripts) == 1:
+ script_tags_string = ','.join(
+ list_of_found_scripts[0].meta.get('tags', []))
+
+ # Found 1 or more scripts. Scans cache tags to find at least 1 with
+ # cache==True
+ preload_cached_scripts = False
+ for script in list_of_found_scripts:
+ if script.meta.get('cache', False) == True or (
+ script.meta.get('can_force_cache', False) and force_cache):
+ preload_cached_scripts = True
+ break
+
+ # STEP 300 Output: preload_cached_scripts = True if at least one of the
+ # list_of_found_scripts must be cached
+
+ # STEP 400: If not force_skip_cache and at least one script can be cached, find (preload) related cache entries for found scripts
+ # STEP 400 input: script_tags and -tmp (to avoid unfinished scripts
+ # particularly when installation fails)
+
+ cache_list = []
+
+ if not force_skip_cache and preload_cached_scripts:
+ cache_tags_without_tmp_string = '-tmp'
+ if script_tags_string != '':
+ cache_tags_without_tmp_string += ',' + script_tags_string
+ if variation_tags:
+ cache_tags_without_tmp_string += ',_' + \
+ ",_".join(variation_tags)
+ # variation_tags are prefixed with "_" but the CM search function knows only tags and so we need to change "_-" to "-_" for excluding any variations
+ # This change can later be moved to a search function specific to
+ # cache
+ cache_tags_without_tmp_string = cache_tags_without_tmp_string.replace(
+ ",_-", ",-_")
+
+ logging.debug(
+ recursion_spaces +
+ ' - Searching for cached script outputs with the following tags: {}'.format(cache_tags_without_tmp_string))
+
+ search_cache = {'action': 'find',
+ 'automation': self.meta['deps']['cache'],
+ 'tags': cache_tags_without_tmp_string}
+ rc = self.cmind.access(search_cache)
+ if rc['return'] > 0:
+ return rc
+
+ cache_list = rc['list']
+
+ logging.debug(
+ recursion_spaces +
+ ' - Number of cached script outputs found: {}'.format(
+ len(cache_list)))
+
+ # STEP 400 output: cache_list
+
+ # STEP 500: At this stage with have cache_list related to either 1 or more scripts (in case of get,compiler)
+ # If more than 1: Check if in cache and reuse it or ask user to select
+ # STEP 500 input: list_of_found_scripts
+
+ if len(list_of_found_scripts) > 0:
+ # If only tags are used, check if there are no cached scripts with tags - then we will reuse them
+ # The use case: cm run script --tags=get,compiler
+ # CM script will always ask to select gcc,llvm,etc even if any of
+ # them will be already cached
+ if len(cache_list) > 0:
+ new_list_of_found_scripts = []
+
+ for cache_entry in cache_list:
+ # Find associated script and add to the
+ # list_of_found_scripts
+ associated_script_artifact = cache_entry.meta['associated_script_artifact']
+
+ x = associated_script_artifact.find(',')
+ if x < 0:
+ return {'return': 1, 'error': 'CM artifact format is wrong "{}" - no comma found'.format(
+ associated_script_artifact)}
+
+ associated_script_artifact_uid = associated_script_artifact[x + 1:]
+
+ cache_entry.meta['associated_script_artifact_uid'] = associated_script_artifact_uid
+
+ for script in list_of_found_scripts:
+ script_uid = script.meta['uid']
+
+ if associated_script_artifact_uid == script_uid:
+ if script not in new_list_of_found_scripts:
+ new_list_of_found_scripts.append(script)
+
+ # Avoid case when all scripts are pruned due to just 1
+ # variation used
+ if len(new_list_of_found_scripts) > 0:
+ list_of_found_scripts = new_list_of_found_scripts
+
+ # Select scripts
+ if len(list_of_found_scripts) > 1:
+ select_script = select_script_artifact(
+ list_of_found_scripts,
+ 'script',
+ recursion_spaces,
+ False,
+ script_tags_string,
+ quiet,
+ verbose)
+
+ # Remember selection
+ if not skip_remembered_selections:
+ remembered_selections.append({'type': 'script',
+ 'tags': script_tags_string,
+ 'cached_script': list_of_found_scripts[select_script]})
+ else:
+ select_script = 0
+
+ # Prune cache list with the selected script
+ if len(list_of_found_scripts) > 0:
+ script_artifact_uid = list_of_found_scripts[select_script].meta['uid']
+
+ new_cache_list = []
+ for cache_entry in cache_list:
+ if cache_entry.meta['associated_script_artifact_uid'] == script_artifact_uid:
+ new_cache_list.append(cache_entry)
+
+ cache_list = new_cache_list
+
+ # Here a specific script is found and meta obtained
+ # Set some useful local variables
+ script_artifact = list_of_found_scripts[select_script]
+
+ meta = script_artifact.meta
+ path = script_artifact.path
+
+ # Check min CM version requirement
+ min_cm_version = meta.get('min_cm_version', '').strip()
+ if min_cm_version != '':
+ # Check compare version while avoiding craches for older version
+ if 'compare_versions' in dir(utils):
+ comparison = utils.compare_versions(
+ current_cm_version, min_cm_version)
+ if comparison < 0:
+ return {'return': 1, 'error': 'CM script requires CM version >= {} while current CM version is {} - please update using "pip install cmind -U"'.format(
+ min_cm_version, current_cm_version)}
+
+ # Check path to repo
+ script_repo_path = script_artifact.repo_path
+
+ script_repo_path_with_prefix = script_artifact.repo_path
+ if script_artifact.repo_meta.get('prefix', '') != '':
+ script_repo_path_with_prefix = os.path.join(
+ script_repo_path, script_artifact.repo_meta['prefix'])
+
+ env['CM_TMP_CURRENT_SCRIPT_REPO_PATH'] = script_repo_path
+ env['CM_TMP_CURRENT_SCRIPT_REPO_PATH_WITH_PREFIX'] = script_repo_path_with_prefix
+
+ # Check if has --help
+ if i.get('help', False):
+ return utils.call_internal_module(self, __file__, 'module_help', 'print_help', {
+ 'meta': meta, 'path': path})
+
+ run_state['script_id'] = meta['alias'] + "," + meta['uid']
+ run_state['script_tags'] = script_tags
+ run_state['script_variation_tags'] = variation_tags
+ run_state['script_repo_alias'] = script_artifact.repo_meta.get(
+ 'alias', '')
+ run_state['script_repo_git'] = script_artifact.repo_meta.get(
+ 'git', False)
+
+ if not recursion:
+ run_state['script_entry_repo_to_report_errors'] = meta.get(
+ 'repo_to_report_errors', '')
+ run_state['script_entry_repo_alias'] = script_artifact.repo_meta.get(
+ 'alias', '')
+ run_state['script_entry_repo_git'] = script_artifact.repo_meta.get(
+ 'git', False)
+
+ deps = meta.get('deps', [])
+ post_deps = meta.get('post_deps', [])
+ prehook_deps = meta.get('prehook_deps', [])
+ posthook_deps = meta.get('posthook_deps', [])
+ input_mapping = meta.get('input_mapping', {})
+ docker_settings = meta.get('docker')
+ docker_input_mapping = {}
+ if docker_settings:
+ docker_input_mapping = docker_settings.get(
+ 'docker_input_mapping', {})
+ new_env_keys_from_meta = meta.get('new_env_keys', [])
+ new_state_keys_from_meta = meta.get('new_state_keys', [])
+
+ found_script_artifact = utils.assemble_cm_object(
+ meta['alias'], meta['uid'])
+
+ found_script_tags = meta.get('tags', [])
+
+ if i.get('debug_script', False):
+ debug_script_tags = ','.join(found_script_tags)
+
+ logging.debug(recursion_spaces +
+ ' - Found script::{} in {}'.format(found_script_artifact, path))
+
+ # STEP 500 output: script_artifact - unique selected script artifact
+ # (cache_list) pruned for the unique script if cache is used
+ # meta - script meta
+ # path - script path
+ # found_script_tags [] - all tags of the found script
+
+ # HERE WE HAVE ORIGINAL ENV
+
+ # STEP 600: Continue updating env
+ # Add default env from meta to new env if not empty
+ # (env NO OVERWRITE)
+ script_artifact_default_env = meta.get('default_env', {})
+ for key in script_artifact_default_env:
+ env.setdefault(key, script_artifact_default_env[key])
+
+ # Force env from meta['env'] as a CONST
+ # (env OVERWRITE)
+ script_artifact_env = meta.get('env', {})
+ env.update(script_artifact_env)
+
+ script_artifact_state = meta.get('state', {})
+ utils.merge_dicts({'dict1': state,
+ 'dict2': script_artifact_state,
+ 'append_lists': True,
+ 'append_unique': True})
+
+ # Store the default_version in run_state -> may be overridden by
+ # variations
+ default_version = meta.get(
+ 'default_version',
+ '') # not used if version is given
+ run_state['default_version'] = default_version
+
+ # STEP 700: Overwrite env with keys from the script input (to allow user friendly CLI)
+ # IT HAS THE PRIORITY OVER meta['default_env'] and meta['env'] but not over the meta from versions/variations
+ # (env OVERWRITE - user enforces it from CLI)
+ # (it becomes const)
+ if input_mapping:
+ update_env_from_input_mapping(env, i, input_mapping)
+ update_env_from_input_mapping(const, i, input_mapping)
+
+ # This mapping is done in module_misc
+ # if docker_input_mapping:
+ # update_env_from_input_mapping(env, i, docker_input_mapping)
+ # update_env_from_input_mapping(const, i, docker_input_mapping)
+
+ # Update env/state with cost
+ env.update(const)
+ utils.merge_dicts({'dict1': state,
+ 'dict2': const_state,
+ 'append_lists': True,
+ 'append_unique': True})
+
+ # STEP 800: Process variations and update env (overwrite from env and update form default_env)
+ # VARIATIONS HAS THE PRIORITY OVER
+ # MULTIPLE VARIATIONS (THAT CAN BE TURNED ON AT THE SAME TIME) SHOULD
+ # NOT HAVE CONFLICTING ENV
+
+ # VARIATIONS OVERWRITE current ENV but not input keys (they become
+ # const)
+
+ variations = script_artifact.meta.get('variations', {})
+ state['docker'] = meta.get('docker', {})
+
+ r = self._update_state_from_variations(
+ i,
+ meta,
+ variation_tags,
+ variations,
+ env,
+ state,
+ const,
+ const_state,
+ deps,
+ post_deps,
+ prehook_deps,
+ posthook_deps,
+ new_env_keys_from_meta,
+ new_state_keys_from_meta,
+ add_deps_recursive,
+ run_state,
+ recursion_spaces,
+ verbose)
+ if r['return'] > 0:
+ return r
+
+ warnings = meta.get('warnings', [])
+ if len(r.get('warnings', [])) > 0:
+ warnings += r['warnings']
+
+ variation_tags_string = r['variation_tags_string']
+ explicit_variation_tags = r['explicit_variation_tags']
+
+ # USE CASE:
+ # HERE we may have versions in script input and env['CM_VERSION_*']
+
+ # STEP 900: Get version, min, max, usable from env (priority if passed from another script to force version),
+ # then script input, then script meta
+
+ # VERSIONS SHOULD NOT BE USED INSIDE VARIATIONS (in meta)!
+
+ # First, take version from input
+ version = i.get('version', '').strip()
+ version_min = i.get('version_min', '').strip()
+ version_max = i.get('version_max', '').strip()
+ version_max_usable = i.get('version_max_usable', '').strip()
+
+ # Second, take from env
+ if version == '':
+ version = env.get('CM_VERSION', '')
+ if version_min == '':
+ version_min = env.get('CM_VERSION_MIN', '')
+ if version_max == '':
+ version_max = env.get('CM_VERSION_MAX', '')
+ if version_max_usable == '':
+ version_max_usable = env.get(
+ 'CM_VERSION_MAX_USABLE', '')
+
+ # Third, take from meta
+ if version == '':
+ version = meta.get('version', '')
+ if version_min == '':
+ version_min = meta.get('version_min', '')
+ if version_max == '':
+ version_max = meta.get('version_max', '')
+ if version_max_usable == '':
+ version_max_usable = meta.get(
+ 'version_max_usable', '')
+
+ # Update env with resolved versions
+ notes = []
+ for version_index in [(version, 'CM_VERSION', ' == {}'),
+ (version_min, 'CM_VERSION_MIN', ' >= {}'),
+ (version_max, 'CM_VERSION_MAX', ' <= {}'),
+ (version_max_usable, 'CM_VERSION_MAX_USABLE', '({})')]:
+ version_value = version_index[0]
+ key = version_index[1]
+ note = version_index[2]
+
+ if version_value != '':
+ env[key] = version_value
+
+ notes.append(note.format(version_value))
+# elif key in env:
+# # If version_X is "", remove related key from ENV ...
+# del(env[key])
+
+ if len(notes) > 0:
+ logging.debug(
+ recursion_spaces +
+ ' - Requested version: ' +
+ ' '.join(notes))
+
+ # STEP 900 output: version* set
+ # env['CM_VERSION*] set
+
+ # STEP 1000: Update version only if in "versions" (not obligatory)
+ # can be useful when handling complex Git revisions
+ versions = script_artifact.meta.get('versions', {})
+
+ if version != '' and version in versions:
+ versions_meta = versions[version]
+ r = update_state_from_meta(
+ versions_meta,
+ env,
+ state,
+ const,
+ const_state,
+ deps,
+ post_deps,
+ prehook_deps,
+ posthook_deps,
+ new_env_keys_from_meta,
+ new_state_keys_from_meta,
+ run_state,
+ i)
+ if r['return'] > 0:
+ return r
+ adr = get_adr(versions_meta)
+ if adr:
+ self._merge_dicts_with_tags(add_deps_recursive, adr)
+ # Processing them again using updated deps for
+ # add_deps_recursive
+ r = update_adr_from_meta(
+ deps,
+ post_deps,
+ prehook_deps,
+ posthook_deps,
+ add_deps_recursive,
+ env)
+
+ # STEP 1100: Update deps from input
+ r = update_deps_from_input(
+ deps, post_deps, prehook_deps, posthook_deps, i)
+ if r['return'] > 0:
+ return r
+
+ r = update_env_with_values(env)
+ if r['return'] > 0:
+ return r
+
+ if str(env.get('CM_RUN_STATE_DOCKER', False)
+ ).lower() in ['true', '1', 'yes']:
+ if state.get('docker'):
+ if str(state['docker'].get('run', True)
+ ).lower() in ['false', '0', 'no']:
+ logging.info(
+ recursion_spaces +
+ ' - Skipping script::{} run as we are inside docker'.format(found_script_artifact))
+
+ # restore env and state
+ for k in list(env.keys()):
+ del (env[k])
+ for k in list(state.keys()):
+ del (state[k])
+
+ env.update(saved_env)
+ state.update(saved_state)
+
+ rr = {
+ 'return': 0,
+ 'env': env,
+ 'new_env': {},
+ 'state': state,
+ 'new_state': {},
+ 'deps': []}
+ return rr
+
+ elif str(state['docker'].get('real_run', True)).lower() in ['false', '0', 'no']:
+ logging.info(
+ recursion_spaces +
+ ' - Doing fake run for script::{} as we are inside docker'.format(found_script_artifact))
+ fake_run = True
+ env['CM_TMP_FAKE_RUN'] = 'yes'
+
+ #######################################################################
+ # Check extra cache tags
+ x = env.get('CM_EXTRA_CACHE_TAGS', '').strip()
+ extra_cache_tags = [] if x == '' else x.split(',')
+
+ if i.get('extra_cache_tags', '') != '':
+ for x in i['extra_cache_tags'].strip().split(','):
+ if x != '':
+ if '<<<' in x:
+ import re
+ tmp_values = re.findall(r'<<<(.*?)>>>', str(x))
+ for tmp_value in tmp_values:
+ xx = str(env.get(tmp_value, ''))
+ x = x.replace("<<<" + tmp_value + ">>>", xx)
+ if x not in extra_cache_tags:
+ extra_cache_tags.append(x)
+
+ if env.get('CM_NAME', '') != '':
+ extra_cache_tags.append('name-' + env['CM_NAME'].strip().lower())
+
+ #######################################################################
+ # Check if need to clean output files
+ clean_output_files = meta.get('clean_output_files', [])
+
+ if len(clean_output_files) > 0:
+ clean_tmp_files(clean_output_files, recursion_spaces)
+
+ #######################################################################
+ # Check if the output of a selected script should be cached
+ cache = False if i.get(
+ 'skip_cache',
+ False) else meta.get(
+ 'cache',
+ False)
+ cache = cache or (
+ i.get(
+ 'force_cache',
+ False) and meta.get(
+ 'can_force_cache',
+ False))
+ # fake run skips run script - should not pollute cache
+ cache = False if fake_run else cache
+
+ cached_uid = ''
+ cached_tags = []
+ cached_meta = {}
+
+ remove_tmp_tag = False
+ reuse_cached = False
+
+ found_cached = False
+ cached_path = ''
+
+ local_env_keys_from_meta = meta.get('local_env_keys', [])
+
+ # Check if has customize.py
+ path_to_customize_py = os.path.join(path, 'customize.py')
+ customize_code = None
+ customize_common_input = {}
+
+ if os.path.isfile(path_to_customize_py) and cache:
+ r = utils.load_python_module(
+ {'path': path, 'name': 'customize'})
+ if r['return'] > 0:
+ return r
+
+ customize_code = r['code']
+
+ customize_common_input = {
+ 'input': i,
+ 'automation': self,
+ 'artifact': script_artifact,
+ 'customize': script_artifact.meta.get('customize', {}),
+ 'os_info': os_info,
+ 'recursion_spaces': recursion_spaces,
+ 'script_tags': script_tags,
+ 'variation_tags': variation_tags
+ }
+
+ #######################################################################
+ # Check if script is cached if we need to skip deps from cached entries
+ this_script_cached = False
+
+ #######################################################################
+ # Check if the output of a selected script should be cached
+ if cache:
+ # TBD - need to reuse and prune cache_list instead of a new CM
+ # search inside find_cached_script
+
+ r = find_cached_script({'self': self,
+ 'recursion_spaces': recursion_spaces,
+ 'extra_recursion_spaces': extra_recursion_spaces,
+ 'add_deps_recursive': add_deps_recursive,
+ 'script_tags': script_tags,
+ 'found_script_tags': found_script_tags,
+ 'found_script_path': path,
+ 'customize_code': customize_code,
+ 'customize_common_input': customize_common_input,
+ 'variation_tags': variation_tags,
+ 'variation_tags_string': variation_tags_string,
+ 'explicit_variation_tags': explicit_variation_tags,
+ 'version': version,
+ 'version_min': version_min,
+ 'version_max': version_max,
+ 'extra_cache_tags': extra_cache_tags,
+ 'new_cache_entry': new_cache_entry,
+ 'meta': meta,
+ 'env': env,
+ 'state': state,
+ 'const': const,
+ 'const_state': const_state,
+ 'skip_remembered_selections': skip_remembered_selections,
+ 'remembered_selections': remembered_selections,
+ 'quiet': quiet,
+ 'verbose': verbose,
+ 'show_time': show_time
+ })
+ if r['return'] > 0:
+ return r
+
+ # Sort by tags to ensure determinism in order (and later add
+ # versions)
+ found_cached_scripts = sorted(
+ r['found_cached_scripts'],
+ key=lambda x: sorted(
+ x.meta['tags']))
+
+ cached_tags = r['cached_tags']
+ search_tags = r['search_tags']
+
+ num_found_cached_scripts = len(found_cached_scripts)
+
+ if num_found_cached_scripts > 0:
+ selection = 0
+
+ # Check if quiet mode
+ if num_found_cached_scripts > 1:
+ if quiet:
+ num_found_cached_scripts = 1
+
+ if num_found_cached_scripts > 1:
+ selection = select_script_artifact(
+ found_cached_scripts,
+ 'cached script output',
+ recursion_spaces,
+ True,
+ script_tags_string,
+ quiet,
+ verbose)
+
+ if selection >= 0:
+ if not skip_remembered_selections:
+ # Remember selection
+ remembered_selections.append({'type': 'cache',
+ 'tags': search_tags,
+ 'cached_script': found_cached_scripts[selection]})
+ else:
+ num_found_cached_scripts = 0
+
+ elif num_found_cached_scripts == 1:
+ logging.debug(
+ recursion_spaces +
+ ' - Found cached script output: {}'.format(
+ found_cached_scripts[0].path))
+
+ if num_found_cached_scripts > 0:
+ found_cached = True
+
+ # Check chain of dynamic dependencies on other CM scripts
+ if len(deps) > 0:
+ logging.debug(
+ recursion_spaces +
+ ' - Checking dynamic dependencies on other CM scripts:')
+
+ r = self._call_run_deps(deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive,
+ recursion_spaces + extra_recursion_spaces,
+ remembered_selections, variation_tags_string, True, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state)
+ if r['return'] > 0:
+ return r
+
+ logging.debug(
+ recursion_spaces +
+ ' - Processing env after dependencies ...')
+
+ r = update_env_with_values(env)
+ if r['return'] > 0:
+ return r
+
+ # Check chain of prehook dependencies on other CM scripts.
+ # (No execution of customize.py for cached scripts)
+ logging.debug(
+ recursion_spaces +
+ ' - Checking prehook dependencies on other CM scripts:')
+
+ r = self._call_run_deps(prehook_deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive,
+ recursion_spaces + extra_recursion_spaces,
+ remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state)
+ if r['return'] > 0:
+ return r
+
+ # Continue with the selected cached script
+ cached_script = found_cached_scripts[selection]
+
+ logging.debug(
+ recursion_spaces +
+ ' - Loading state from cached entry ...')
+
+ path_to_cached_state_file = os.path.join(cached_script.path,
+ self.file_with_cached_state)
+
+ r = utils.load_json(file_name=path_to_cached_state_file)
+ if r['return'] > 0:
+ return r
+ version = r['meta'].get('version')
+
+ if not run_state.get('tmp_silent', False):
+ logging.info(
+ recursion_spaces +
+ ' ! load {}'.format(path_to_cached_state_file))
+
+ ###########################################################
+ # IF REUSE FROM CACHE - update env and state from cache!
+ cached_state = r['meta']
+
+ r = self._fix_cache_paths(cached_state['new_env'])
+ if r['return'] > 0:
+ return r
+ new_env = r['new_env']
+
+ utils.merge_dicts(
+ {'dict1': env, 'dict2': new_env, 'append_lists': True, 'append_unique': True})
+
+ new_state = cached_state['new_state']
+ utils.merge_dicts({'dict1': state,
+ 'dict2': new_state,
+ 'append_lists': True,
+ 'append_unique': True})
+
+ utils.merge_dicts(
+ {'dict1': new_env, 'dict2': const, 'append_lists': True, 'append_unique': True})
+ utils.merge_dicts({'dict1': new_state,
+ 'dict2': const_state,
+ 'append_lists': True,
+ 'append_unique': True})
+
+ if not fake_run:
+ # Check chain of posthook dependencies on other CM scripts. We consider them same as postdeps when
+ # script is in cache
+ logging.debug(
+ recursion_spaces +
+ ' - Checking posthook dependencies on other CM scripts:')
+
+ clean_env_keys_post_deps = meta.get(
+ 'clean_env_keys_post_deps', [])
+
+ r = self._call_run_deps(posthook_deps, self.local_env_keys, clean_env_keys_post_deps, env, state, const, const_state, add_deps_recursive,
+ recursion_spaces + extra_recursion_spaces,
+ remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state)
+ if r['return'] > 0:
+ return r
+
+ logging.debug(
+ recursion_spaces +
+ ' - Checking post dependencies on other CM scripts:')
+
+ # Check chain of post dependencies on other CM scripts
+ r = self._call_run_deps(post_deps, self.local_env_keys, clean_env_keys_post_deps, env, state, const, const_state, add_deps_recursive,
+ recursion_spaces + extra_recursion_spaces,
+ remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state)
+ if r['return'] > 0:
+ return r
+
+ if renew or (not found_cached and num_found_cached_scripts == 0):
+ # Add more tags to cached tags
+ # based on meta information of the found script
+ x = 'script-artifact-' + meta['uid']
+ if x not in cached_tags:
+ cached_tags.append(x)
+
+ # Add all tags from the original CM script
+ for x in meta.get('tags', []):
+ if x not in cached_tags:
+ cached_tags.append(x)
+
+ if not found_cached and num_found_cached_scripts == 0:
+ if i.get('only_execute_from_cache'):
+ # useful to check valid cache entries for a script (cm show
+ # cache can return invalid cache entries for a script too)
+ return {
+ 'return': 1, 'error': f'No valid cache entry found for {cached_tags}'}
+
+ # If not cached, create cached script artifact and mark as tmp
+ # (remove if cache successful)
+ tmp_tags = ['tmp']
+
+ # Finalize tmp tags
+ tmp_tags += [t for t in cached_tags if not t.startswith("-")]
+
+ # Check if some variations are missing
+ # though it should not happen!
+ for t in variation_tags:
+ if t.startswith("-"):
+ continue
+ x = '_' + t
+ if x not in tmp_tags:
+ tmp_tags.append(x)
+
+ # Use update to update the tmp one if already exists
+ logging.debug(
+ recursion_spaces +
+ ' - Creating new "cache" script artifact in the CM local repository ...')
+ logging.debug(recursion_spaces +
+ ' - Tags: {}'.format(','.join(tmp_tags)))
+
+ if version != '':
+ cached_meta['version'] = version
+
+ ii = {'action': 'update',
+ 'automation': self.meta['deps']['cache'],
+ 'search_tags': tmp_tags,
+ 'tags': ','.join(tmp_tags),
+ 'meta': cached_meta,
+ 'force': True}
+
+ r = self.cmind.access(ii)
+ if r['return'] > 0:
+ return r
+
+ remove_tmp_tag = True
+
+ cached_script = r['list'][0]
+
+ cached_path = cached_script.path
+ cached_meta = cached_script.meta
+
+ cached_uid = cached_meta['uid']
+
+ # Changing path to CM script artifact for cached output
+ # to record data and files there
+ logging.debug(
+ recursion_spaces +
+ ' - Changing to {}'.format(cached_path))
+
+ os.chdir(cached_path)
+
+ # If found cached and we want to renew it
+ if found_cached and renew:
+ cached_path = cached_script.path
+ cached_meta = cached_script.meta
+
+ cached_uid = cached_meta['uid']
+
+ # Changing path to CM script artifact for cached output
+ # to record data and files there
+ logging.debug(
+ recursion_spaces +
+ ' - Changing to {}'.format(cached_path))
+
+ os.chdir(cached_path)
+
+ # Force to finalize script inside cached entry
+ found_cached = False
+ remove_tmp_tag = True
+
+ env['CM_RENEW_CACHE_ENTRY'] = 'yes'
+
+ # Prepare files to be cleaned
+ clean_files = [self.tmp_file_run_state,
+ self.tmp_file_run_env,
+ self.tmp_file_ver,
+ self.tmp_file_env + bat_ext,
+ self.tmp_file_env_all + bat_ext,
+ self.tmp_file_state,
+ self.tmp_file_run + bat_ext]
+
+ if not found_cached and len(meta.get('clean_files', [])) > 0:
+ clean_files = meta['clean_files'] + clean_files
+
+ ################################
+ if not found_cached:
+ if len(warnings) > 0:
+ logging.warn(
+ '=================================================')
+ logging.warn('WARNINGS:')
+ for w in warnings:
+ logging.warn(' ' + w)
+ logging.warn(
+ '=================================================')
+
+ # Update default version meta if version is not set
+ if version == '':
+ default_version = run_state.get('default_version', '')
+ if default_version != '':
+ version = default_version
+
+ if version_min != '':
+ ry = self.cmind.access({'action': 'compare_versions',
+ 'automation': 'utils,dc2743f8450541e3',
+ 'version1': version,
+ 'version2': version_min})
+ if ry['return'] > 0:
+ return ry
+
+ if ry['comparison'] < 0:
+ version = version_min
+
+ if version_max != '':
+ ry = self.cmind.access({'action': 'compare_versions',
+ 'automation': 'utils,dc2743f8450541e3',
+ 'version1': version,
+ 'version2': version_max})
+ if ry['return'] > 0:
+ return ry
+
+ if ry['comparison'] > 0:
+ if version_max_usable != '':
+ version = version_max_usable
+ else:
+ version = version_max
+
+ logging.debug(
+ recursion_spaces +
+ ' - Version is not specified - use either default_version from meta or min/max/usable: {}'.format(version))
+
+ r = _update_env(env, 'CM_VERSION', version)
+ if r['return'] > 0:
+ return r
+
+ if 'version-' + version not in cached_tags:
+ cached_tags.append('version-' + version)
+
+ if default_version in versions:
+ versions_meta = versions[default_version]
+ r = update_state_from_meta(
+ versions_meta,
+ env,
+ state,
+ const,
+ const_state,
+ deps,
+ post_deps,
+ prehook_deps,
+ posthook_deps,
+ new_env_keys_from_meta,
+ new_state_keys_from_meta,
+ run_state,
+ i)
+ if r['return'] > 0:
+ return r
+
+ if "add_deps_recursive" in versions_meta:
+ self._merge_dicts_with_tags(
+ add_deps_recursive, versions_meta['add_deps_recursive'])
+
+ r = _update_env(env, 'CM_TMP_CURRENT_SCRIPT_PATH', path)
+ if r['return'] > 0:
+ return r
+
+ # Run chain of docker dependencies if current run cmd is from
+ # inside a docker container
+ docker_deps = []
+ if i.get('docker_run_deps'):
+ docker_meta = meta.get('docker')
+ if docker_meta:
+ docker_deps = docker_meta.get('deps', [])
+ if docker_deps:
+ docker_deps = [
+ dep for dep in docker_deps if not dep.get(
+ 'skip_inside_docker', False)]
+
+ if len(docker_deps) > 0:
+
+ logging.debug(
+ recursion_spaces +
+ ' - Checking docker run dependencies on other CM scripts:')
+
+ r = self._call_run_deps(docker_deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive,
+ recursion_spaces + extra_recursion_spaces,
+ remembered_selections, variation_tags_string, False, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state)
+ if r['return'] > 0:
+ return r
+
+ logging.debug(
+ recursion_spaces +
+ ' - Processing env after docker run dependencies ...')
+
+ r = update_env_with_values(env)
+ if r['return'] > 0:
+ return r
+
+ # Check chain of dependencies on other CM scripts
+ if len(deps) > 0:
+ logging.debug(recursion_spaces +
+ ' - Checking dependencies on other CM scripts:')
+
+ r = self._call_run_deps(deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive,
+ recursion_spaces + extra_recursion_spaces,
+ remembered_selections, variation_tags_string, False, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state)
+ if r['return'] > 0:
+ return r
+
+ logging.debug(recursion_spaces +
+ ' - Processing env after dependencies ...')
+
+ r = update_env_with_values(env)
+ if r['return'] > 0:
+ return r
+
+ # Clean some output files
+ clean_tmp_files(clean_files, recursion_spaces)
+
+ # Prepare common input to prepare and run script
+ run_script_input = {
+ 'path': path,
+ 'bat_ext': bat_ext,
+ 'os_info': os_info,
+ 'const': const,
+ 'state': state,
+ 'const_state': const_state,
+ 'reuse_cached': reuse_cached,
+ 'recursion': recursion,
+ 'recursion_spaces': recursion_spaces,
+ 'remembered_selections': remembered_selections,
+ 'tmp_file_run_state': self.tmp_file_run_state,
+ 'tmp_file_run_env': self.tmp_file_run_env,
+ 'tmp_file_state': self.tmp_file_state,
+ 'tmp_file_run': self.tmp_file_run,
+ 'local_env_keys': self.local_env_keys,
+ 'local_env_keys_from_meta': local_env_keys_from_meta,
+ 'posthook_deps': posthook_deps,
+ 'add_deps_recursive': add_deps_recursive,
+ 'remembered_selections': remembered_selections,
+ 'found_script_tags': found_script_tags,
+ 'variation_tags_string': variation_tags_string,
+ 'found_cached': False,
+ 'debug_script_tags': debug_script_tags,
+ 'verbose': verbose,
+ 'meta': meta,
+ 'self': self
+ }
+ if os.path.isfile(
+ path_to_customize_py): # possible duplicate execution - needs fix
+ r = utils.load_python_module(
+ {'path': path, 'name': 'customize'})
+ if r['return'] > 0:
+ return r
+
+ customize_code = r['code']
+
+ customize_common_input = {
+ 'input': i,
+ 'automation': self,
+ 'artifact': script_artifact,
+ 'customize': script_artifact.meta.get('customize', {}),
+ 'os_info': os_info,
+ 'recursion_spaces': recursion_spaces,
+ 'script_tags': script_tags,
+ 'variation_tags': variation_tags
+ }
+ run_script_input['customize_code'] = customize_code
+ run_script_input['customize_common_input'] = customize_common_input
+
+ if repro_prefix != '':
+ run_script_input['repro_prefix'] = repro_prefix
+ if ignore_script_error:
+ run_script_input['ignore_script_error'] = True
+
+ # Assemble PIP versions
+ pip_version_string = ''
+
+ pip_version = env.get('CM_VERSION', '')
+ pip_version_min = env.get('CM_VERSION_MIN', '')
+ pip_version_max = env.get('CM_VERSION_MAX', '')
+
+ if pip_version != '':
+ pip_version_string = '==' + pip_version
+ elif pip_version_min != '' and pip_version_max != '':
+ pip_version_string = '>=' + pip_version_min + ',<=' + pip_version_max
+ elif pip_version_min != '':
+ pip_version_string = '>=' + pip_version_min
+ elif pip_version_max != '':
+ pip_version_string = '<=' + pip_version_max
+
+ env.update(const)
+ utils.merge_dicts({'dict1': state,
+ 'dict2': const_state,
+ 'append_lists': True,
+ 'append_unique': True})
+
+ r = _update_env(
+ env,
+ 'CM_TMP_PIP_VERSION_STRING',
+ pip_version_string)
+ if r['return'] > 0:
+ return r
+
+ if pip_version_string != '':
+ logging.debug(
+ recursion_spaces +
+ ' # potential PIP version string (if needed): ' +
+ pip_version_string)
+
+ tmp_curdir = os.getcwd()
+ if env.get('CM_OUTDIRNAME', '') != '':
+ if not os.path.exists(env['CM_OUTDIRNAME']):
+ os.makedirs(env['CM_OUTDIRNAME'])
+ os.chdir(env['CM_OUTDIRNAME'])
+
+ # Check if pre-process and detect
+ if 'preprocess' in dir(customize_code) and not fake_run:
+
+ logging.debug(recursion_spaces + ' - Running preprocess ...')
+
+ run_script_input['run_state'] = run_state
+
+ ii = copy.deepcopy(customize_common_input)
+ ii['env'] = env
+ ii['state'] = state
+ ii['meta'] = meta
+ # may need to detect versions in multiple paths
+ ii['run_script_input'] = run_script_input
+
+ r = customize_code.preprocess(ii)
+ if r['return'] > 0:
+ return r
+
+ # Check if preprocess says to skip this component
+ skip = r.get('skip', False)
+
+ if skip:
+ logging.debug(
+ recursion_spaces +
+ ' - this script is skipped!')
+
+ # Check if script asks to run other dependencies instead of
+ # the skipped one
+ another_script = r.get('script', {})
+
+ if len(another_script) == 0:
+ return {'return': 0, 'skipped': True}
+
+ logging.debug(
+ recursion_spaces +
+ ' - another script is executed instead!')
+
+ ii = {
+ 'action': 'run',
+ 'automation': utils.assemble_cm_object(self.meta['alias'], self.meta['uid']),
+ 'recursion_spaces': recursion_spaces + extra_recursion_spaces,
+ 'recursion': True,
+ 'remembered_selections': remembered_selections,
+ 'env': env,
+ 'state': state,
+ 'const': const,
+ 'const_state': const_state,
+ 'save_env': save_env,
+ 'add_deps_recursive': add_deps_recursive
+ }
+
+ ii.update(another_script)
+
+ # Return to current path
+ os.chdir(current_path)
+
+ ###########################################################
+ return self.cmind.access(ii)
+
+ # If return version
+ if cache:
+ if r.get('version', '') != '':
+ cached_tags = [
+ x for x in cached_tags if not x.startswith('version-')]
+ cached_tags.append('version-' + r['version'])
+
+ if len(r.get('add_extra_cache_tags', [])) > 0:
+ for t in r['add_extra_cache_tags']:
+ if t not in cached_tags:
+ cached_tags.append(t)
+
+ if print_env:
+ import json
+ logging.debug(json.dumps(env, indent=2, sort_keys=True))
+
+ # Check chain of pre hook dependencies on other CM scripts
+ if len(prehook_deps) > 0:
+ logging.debug(
+ recursion_spaces +
+ ' - Checking prehook dependencies on other CM scripts:')
+
+ r = self._call_run_deps(prehook_deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive,
+ recursion_spaces + extra_recursion_spaces,
+ remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state)
+ if r['return'] > 0:
+ return r
+
+ if not fake_run:
+ env_key_mappings = meta.get("env_key_mappings", {})
+ if env_key_mappings:
+ update_env_keys(env, env_key_mappings)
+
+ run_script_input['meta'] = meta
+ run_script_input['env'] = env
+ run_script_input['run_state'] = run_state
+ run_script_input['recursion'] = recursion
+
+ r = prepare_and_run_script_with_postprocessing(
+ run_script_input)
+ if r['return'] > 0:
+ return r
+
+ # If return version
+ if r.get('version', '') != '':
+ version = r.get('version')
+ if cache:
+ cached_tags = [
+ x for x in cached_tags if not x.startswith('version-')]
+ cached_tags.append('version-' + r['version'])
+
+ if len(r.get('add_extra_cache_tags', [])) > 0 and cache:
+ for t in r['add_extra_cache_tags']:
+ if t not in cached_tags:
+ cached_tags.append(t)
+
+ # Check chain of post dependencies on other CM scripts
+ clean_env_keys_post_deps = meta.get(
+ 'clean_env_keys_post_deps', [])
+
+ r = self._run_deps(post_deps, clean_env_keys_post_deps, env, state, const, const_state, add_deps_recursive, recursion_spaces,
+ remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state)
+ if r['return'] > 0:
+ return r
+
+ # Add extra tags from env updated by deps (such as python version
+ # and compiler version, etc)
+ extra_cache_tags_from_env = meta.get(
+ 'extra_cache_tags_from_env', [])
+ for extra_cache_tags in extra_cache_tags_from_env:
+ key = extra_cache_tags['env']
+ prefix = extra_cache_tags.get('prefix', '')
+
+ v = env.get(key, '').strip()
+ if v != '':
+ for t in v.split(','):
+ x = 'deps-' + prefix + t
+ if x not in cached_tags:
+ cached_tags.append(x)
+
+ if env.get('CM_OUTDIRNAME', '') != '':
+ os.chdir(tmp_curdir)
+
+ detected_version = env.get(
+ 'CM_DETECTED_VERSION', env.get(
+ 'CM_VERSION', ''))
+ dependent_cached_path = env.get('CM_GET_DEPENDENT_CACHED_PATH', '')
+
+ #######################################################################
+ # Finalize script
+
+ # Force consts in the final new env and state
+ utils.merge_dicts({'dict1': env, 'dict2': const,
+ 'append_lists': True, 'append_unique': True})
+ utils.merge_dicts({'dict1': state,
+ 'dict2': const_state,
+ 'append_lists': True,
+ 'append_unique': True})
+
+ if i.get('force_new_env_keys', []):
+ new_env_keys = i['force_new_env_keys']
+ else:
+ new_env_keys = new_env_keys_from_meta
+
+ if i.get('force_new_state_keys', []):
+ new_state_keys = i['force_new_state_keys']
+ else:
+ new_state_keys = new_state_keys_from_meta
+
+ r = detect_state_diff(
+ env,
+ saved_env,
+ new_env_keys,
+ new_state_keys,
+ state,
+ saved_state)
+ if r['return'] > 0:
+ return r
+
+ new_env = r['new_env']
+ new_state = r['new_state']
+
+ utils.merge_dicts({'dict1': saved_env,
+ 'dict2': new_env,
+ 'append_lists': True,
+ 'append_unique': True})
+ utils.merge_dicts({'dict1': saved_state,
+ 'dict2': new_state,
+ 'append_lists': True,
+ 'append_unique': True})
+
+ # Restore original env/state and merge env/state
+ # This is needed since we want to keep original env/state outside this script
+ # If we delete env and create a new dict, the original one outside this script will be detached
+ # That's why we just clean all keys in original env/state (used oustide)
+ # And then copy saved_env (with new_env merged) and saved_state (with new_state merged)
+ # while getting rid of all temporal updates in env and state inside
+ # this script
+
+ for k in list(env.keys()):
+ del (env[k])
+ for k in list(state.keys()):
+ del (state[k])
+
+ env.update(saved_env)
+ state.update(saved_state)
+
+ # Prepare env script content (to be saved in cache and in the current
+ # path if needed)
+ env_script = convert_env_to_script(
+ new_env, os_info, start_script=os_info['start_script'])
+
+ # If using cached script artifact, return to default path and then
+ # update the cache script artifact
+ if cache and cached_path != '':
+ # Check if need to remove tag
+ if remove_tmp_tag:
+ # Save state, env and deps for reuse
+ r = utils.save_json(file_name=os.path.join(cached_path, self.file_with_cached_state),
+ meta={'new_state': new_state, 'new_env': new_env, 'deps': deps, 'version': version})
+ if r['return'] > 0:
+ return r
+
+ # Save all env
+ env_all_script = convert_env_to_script(
+ env, os_info, start_script=os_info['start_script'])
+
+ r = record_script(os.path.join(cached_path, self.tmp_file_env_all + bat_ext),
+ env_all_script, os_info)
+ if r['return'] > 0:
+ return r
+
+ # Save env
+ r = record_script(os.path.join(cached_path, self.tmp_file_env + bat_ext),
+ env_script, os_info)
+ if r['return'] > 0:
+ return r
+
+ # Remove tmp tag from the "cached" arifact to finalize caching
+ logging.debug(
+ recursion_spaces +
+ ' - Removing tmp tag in the script cached output {} ...'.format(cached_uid))
+
+ # Check if version was detected and record in meta)
+ if detected_version != '':
+ cached_meta['version'] = detected_version
+
+ if found_script_artifact != '':
+ cached_meta['associated_script_artifact'] = found_script_artifact
+
+ x = found_script_artifact.find(',')
+ if x < 0:
+ return {
+ 'return': 1, 'error': 'CM artifact format is wrong "{}" - no comma found'.format(found_script_artifact)}
+
+ cached_meta['associated_script_artifact_uid'] = found_script_artifact[x + 1:]
+
+ # Check if the cached entry is dependent on any path
+ if dependent_cached_path != '':
+ if os.path.isdir(cached_path) and os.path.exists(
+ dependent_cached_path):
+ if not os.path.samefile(
+ cached_path, dependent_cached_path):
+ cached_meta['dependent_cached_path'] = dependent_cached_path
+
+ ii = {'action': 'update',
+ 'automation': self.meta['deps']['cache'],
+ 'artifact': cached_uid,
+ 'meta': cached_meta,
+ 'replace_lists': True, # To replace tags
+ 'tags': ','.join(cached_tags)}
+
+ r = self.cmind.access(ii)
+ if r['return'] > 0:
+ return r
+
+ # Clean tmp files only in current path (do not touch cache - we keep
+ # all info there)
+ script_path = os.getcwd()
+ os.chdir(current_path)
+
+ shell = i.get('shell', False)
+# if not shell:
+# shell = i.get('debug', False)
+
+ if not shell and not i.get('dirty', False) and not cache:
+ clean_tmp_files(clean_files, recursion_spaces)
+
+ # Record new env and new state in the current dir if needed
+ if save_env or shell:
+ # Check if script_prefix in the state from other components
+ where_to_add = len(os_info['start_script'])
+
+ script_prefix = state.get('script_prefix', [])
+ if len(script_prefix) > 0:
+ env_script.insert(where_to_add, '\n')
+ for x in reversed(script_prefix):
+ env_script.insert(where_to_add, x)
+
+ if shell:
+ x = [
+ 'cmd',
+ '.',
+ '',
+ '.bat',
+ ''] if os_info['platform'] == 'windows' else [
+ 'bash',
+ ' ""',
+ '"',
+ '.sh',
+ '. ./']
+
+ env_script.append('\n')
+ env_script.append('echo{}\n'.format(x[1]))
+ env_script.append(
+ 'echo {}Working path: {}{}'.format(
+ x[2], script_path, x[2]))
+ xtmp_run_file = ''
+ tmp_run_file = 'tmp-run{}'.format(x[3])
+ if os.path.isfile(tmp_run_file):
+ xtmp_run_file = 'Change and run "{}". '.format(
+ tmp_run_file)
+
+ env_script.append(
+ 'echo {}Running debug shell. {}Type exit to quit ...{}\n'.format(
+ x[2], xtmp_run_file, x[2]))
+ env_script.append('echo{}\n'.format(x[1]))
+ env_script.append('\n')
+ env_script.append(x[0])
+
+ env_file = self.tmp_file_env + bat_ext
+
+ r = record_script(env_file, env_script, os_info)
+ if r['return'] > 0:
+ return r
+
+ if shell:
+ x = env_file if os_info['platform'] == 'windows' else '. ./' + env_file
+ os.system(x)
+
+ if not version and detected_version:
+ version = detected_version
+
+ # Add detected or forced version to the CM script run time state
+ # to aggregate all resolved versions and dump them at the end
+ # if requested (for better reproducibility/replicability)
+
+ script_uid = script_artifact.meta.get('uid')
+ script_alias = script_artifact.meta.get('alias')
+
+ # we should use user-friendly tags here
+ # script_tags = script_artifact.meta.get('tags')
+
+ version_info_tags = ",".join(script_tags)
+
+ if len(variation_tags) > 0:
+ for vt in variation_tags:
+ version_info_tags += ',_' + vt
+
+ version_info = {}
+ version_info[version_info_tags] = {
+ 'script_uid': script_uid,
+ 'script_alias': script_alias,
+ 'script_tags': ','.join(found_script_tags),
+ 'script_variations': ','.join(variation_tags),
+ 'version': version,
+ 'parent': run_state['parent']
+ }
+
+ run_state['version_info'].append(version_info)
+
+ script_versions = detected_versions.get(meta['uid'], [])
+
+ if not script_versions:
+ detected_versions[meta['uid']] = [version]
+ else:
+ script_versions.append(version)
+
+ # RETURN
+ elapsed_time = time.time() - start_time
+
+ if verbose and cached_uid != '':
+ logging.info(
+ recursion_spaces +
+ ' - cache UID: {}'.format(cached_uid))
+
+ if print_deps:
+ print_deps_data = self._print_deps(run_state['deps'])
+ new_state['print_deps'] = print_deps_data
+
+ if print_readme or repro_prefix != '':
+ readme = self._get_readme(cmd, run_state)
+
+ # Copy Docker sample
+ if repro_prefix != '' and repro_dir != '':
+ docker_template_path = os.path.join(
+ self.path, 'docker_repro_example')
+ if os.path.isdir(docker_template_path):
+ try:
+
+ shutil.copytree(
+ docker_template_path,
+ repro_dir,
+ dirs_exist_ok=True)
+ except Exception as e:
+ pass
+
+ docker_container = self._get_docker_container(cmd, run_state)
+
+ try:
+
+ with open(os.path.join(repro_dir, 'ubuntu-23.04.Dockerfile'), 'a+') as f:
+ f.write(docker_container)
+ except BaseException:
+ pass
+
+ if print_readme:
+ with open('README-cm.md', 'w') as f:
+ f.write(readme)
+
+ if dump_version_info:
+ r = self._dump_version_info_for_script(quiet=quiet, silent=silent)
+ if r['return'] > 0:
+ return r
+
+ rr = {
+ 'return': 0,
+ 'env': env,
+ 'new_env': new_env,
+ 'state': state,
+ 'new_state': new_state,
+ 'deps': run_state.get('deps')}
+
+ # Print output as json to console
+ if i.get('json', False) or i.get('j', False):
+ import json
+ logging.info(json.dumps(rr, indent=2))
+
+ # Check if save json to file
+ if repro_prefix != '':
+
+ with open(repro_prefix + '-README-cm.md', 'w', encoding='utf-8') as f:
+ f.write(readme)
+
+ dump_repro(repro_prefix, rr, run_state)
+
+ if verbose or show_time:
+ logging.info(
+ recursion_spaces +
+ ' - running time of script "{}": {:.2f} sec.'.format(
+ ','.join(found_script_tags),
+ elapsed_time))
+
+ if not recursion and show_space:
+ stop_disk_stats = shutil.disk_usage("/")
+
+ used_disk_space_in_mb = int(
+ (start_disk_stats.free - stop_disk_stats.free) / (1024 * 1024))
+
+ if used_disk_space_in_mb > 0:
+ logging.info(
+ recursion_spaces +
+ ' - used disk space: {} MB'.format(used_disk_space_in_mb))
+
+ # Check if need to print some final info such as path to model, etc
+ if not run_state.get('tmp_silent', False):
+ print_env_at_the_end = meta.get('print_env_at_the_end', {})
+ if len(print_env_at_the_end) > 0:
+ for p in sorted(print_env_at_the_end):
+ t = print_env_at_the_end[p]
+ if t == '':
+ t = 'ENV[{}]'.format(p)
+
+ v = new_env.get(p, None)
+
+ logging.info('{}: {}'.format(t, str(v)))
+
+ # Check if print nice versions
+ if print_versions:
+ self._print_versions(run_state)
+
+ # Check if pause (useful if running a given script in a new terminal
+ # that may close automatically)
+ if i.get('pause', False):
+ input('Press Enter to continue ...')
+
+ return rr
+
+ ##########################################################################
+ def _fix_cache_paths(self, env):
+ cm_repos_path = os.environ.get(
+ 'CM_REPOS', os.path.join(
+ os.path.expanduser("~"), "CM", "repos"))
+ current_cache_path = os.path.realpath(
+ os.path.join(cm_repos_path, "local", "cache"))
+
+ new_env = env # just a reference
+
+ for key, val in new_env.items():
+ # Check for a path separator in a string and determine the
+ # separator
+ if isinstance(val, str) and any(sep in val for sep in [
+ "/local/cache/", "\\local\\cache\\"]):
+ sep = "/" if "/local/cache/" in val else "\\"
+
+ path_split = val.split(sep)
+ repo_entry_index = path_split.index("local")
+ loaded_cache_path = sep.join(
+ path_split[0:repo_entry_index + 2])
+ if loaded_cache_path != current_cache_path and os.path.exists(
+ current_cache_path):
+ new_env[key] = val.replace(
+ loaded_cache_path, current_cache_path)
+
+ elif isinstance(val, list):
+ for i, val2 in enumerate(val):
+ if isinstance(val2, str) and any(sep in val2 for sep in [
+ "/local/cache/", "\\local\\cache\\"]):
+ sep = "/" if "/local/cache/" in val2 else "\\"
+
+ path_split = val2.split(sep)
+ repo_entry_index = path_split.index("local")
+ loaded_cache_path = sep.join(
+ path_split[0:repo_entry_index + 2])
+ if loaded_cache_path != current_cache_path and os.path.exists(
+ current_cache_path):
+ new_env[key][i] = val2.replace(
+ loaded_cache_path, current_cache_path)
+
+ return {'return': 0, 'new_env': new_env}
+
+ ##########################################################################
+ def _dump_version_info_for_script(
+ self, output_dir=os.getcwd(), quiet=False, silent=False):
+
+ if not quiet and not silent:
+ pass
+ for f in ['cm-run-script-versions.json', 'version_info.json']:
+ if not quiet and not silent:
+ logging.info('Dumping versions to {}'.format(f))
+ r = utils.save_json(f, self.run_state.get('version_info', []))
+ if r['return'] > 0:
+ return r
+
+ return {'return': 0}
+
+ ##########################################################################
+ def _update_state_from_variations(self, i, meta, variation_tags, variations, env, state, const, const_state, deps, post_deps, prehook_deps,
+ posthook_deps, new_env_keys_from_meta, new_state_keys_from_meta, add_deps_recursive, run_state, recursion_spaces, verbose):
+
+ # Save current explicit variations
+ import copy
+ explicit_variation_tags = copy.deepcopy(variation_tags)
+
+ # Calculate space
+ required_disk_space = {}
+
+ # Check if warning
+ warnings = []
+
+ # variation_tags get appended by any aliases
+ r = self._get_variations_with_aliases(variation_tags, variations)
+ if r['return'] > 0:
+ return r
+ variation_tags = r['variation_tags']
+ excluded_variation_tags = r['excluded_variation_tags']
+
+ # Get a dictionary of variation groups
+ r = self._get_variation_groups(variations)
+ if r['return'] > 0:
+ return r
+
+ variation_groups = r['variation_groups']
+
+ run_state['variation_groups'] = variation_groups
+
+ # Add variation(s) if specified in the "tags" input prefixed by _
+
+ # If there is only 1 default variation, then just use it or
+ # substitute from CMD
+
+ default_variation = meta.get('default_variation', '')
+
+ if default_variation and default_variation not in variations:
+ return {'return': 1, 'error': 'Default variation "{}" is not in the list of variations: "{}" '.format(
+ default_variation, variations.keys())}
+
+ if len(variation_tags) == 0:
+ if default_variation != '' and default_variation not in excluded_variation_tags:
+ variation_tags = [default_variation]
+
+ r = self._update_variation_tags_from_variations(
+ variation_tags, variations, variation_groups, excluded_variation_tags)
+ if r['return'] > 0:
+ return r
+
+ # variation_tags get appended by any default on variation in groups
+ r = self._process_variation_tags_in_groups(
+ variation_tags, variation_groups, excluded_variation_tags, variations)
+ if r['return'] > 0:
+ return r
+ if variation_tags != r['variation_tags']:
+ variation_tags = r['variation_tags']
+
+ # we need to again process variation tags if any new default
+ # variation is added
+ r = self._update_variation_tags_from_variations(
+ variation_tags, variations, variation_groups, excluded_variation_tags)
+ if r['return'] > 0:
+ return r
+
+ valid_variation_combinations = meta.get(
+ 'valid_variation_combinations', [])
+ if valid_variation_combinations:
+ if not any(all(t in variation_tags for t in s)
+ for s in valid_variation_combinations):
+ return {'return': 1, 'error': 'Invalid variation combination "{}" prepared. Valid combinations: "{}" '.format(
+ variation_tags, valid_variation_combinations)}
+
+ invalid_variation_combinations = meta.get(
+ 'invalid_variation_combinations', [])
+ if invalid_variation_combinations:
+ if any(all(t in variation_tags for t in s)
+ for s in invalid_variation_combinations):
+ return {'return': 1, 'error': 'Invalid variation combination "{}" prepared. Invalid combinations: "{}" '.format(
+ variation_tags, invalid_variation_combinations)}
+
+ variation_tags_string = ''
+ if len(variation_tags) > 0:
+ for t in variation_tags:
+ if variation_tags_string != '':
+ variation_tags_string += ','
+
+ x = '_' + t
+ variation_tags_string += x
+
+ logging.debug(
+ recursion_spaces +
+ ' Prepared variations: {}'.format(variation_tags_string))
+
+ # Update env and other keys if variations
+ if len(variation_tags) > 0:
+ for variation_tag in variation_tags:
+ if variation_tag.startswith('~'):
+ # ignore such tag (needed for caching only to differentiate
+ # variations)
+ continue
+
+ if variation_tag.startswith('-'):
+ # ignore such tag (needed for caching only to eliminate
+ # variations)
+ continue
+
+ variation_tag_dynamic_suffix = None
+ if variation_tag not in variations:
+ if '.' in variation_tag and variation_tag[-1] != '.':
+ variation_tag_dynamic_suffix = variation_tag[variation_tag.index(
+ ".") + 1:]
+ if not variation_tag_dynamic_suffix:
+ return {'return': 1, 'error': 'tag {} is not in variations {}'.format(
+ variation_tag, variations.keys())}
+ variation_tag = self._get_name_for_dynamic_variation_tag(
+ variation_tag)
+ if variation_tag not in variations:
+ return {'return': 1, 'error': 'tag {} is not in variations {}'.format(
+ variation_tag, variations.keys())}
+
+ variation_meta = variations[variation_tag]
+ if variation_tag_dynamic_suffix:
+ self._update_variation_meta_with_dynamic_suffix(
+ variation_meta, variation_tag_dynamic_suffix)
+
+ r = update_state_from_meta(
+ variation_meta,
+ env,
+ state,
+ const,
+ const_state,
+ deps,
+ post_deps,
+ prehook_deps,
+ posthook_deps,
+ new_env_keys_from_meta,
+ new_state_keys_from_meta,
+ run_state,
+ i)
+ if r['return'] > 0:
+ return r
+
+ if variation_meta.get('script_name', '') != '':
+ meta['script_name'] = variation_meta['script_name']
+
+ if variation_meta.get('default_version', '') != '':
+ run_state['default_version'] = variation_meta['default_version']
+
+ if variation_meta.get(
+ 'required_disk_space', 0) > 0 and variation_tag not in required_disk_space:
+ required_disk_space[variation_tag] = variation_meta['required_disk_space']
+
+ if variation_meta.get('warning', '') != '':
+ x = variation_meta['warning']
+ if x not in warnings:
+ warnings.append()
+
+ adr = get_adr(variation_meta)
+ if adr:
+ self._merge_dicts_with_tags(add_deps_recursive, adr)
+
+ combined_variations = [t for t in variations if ',' in t]
+
+ combined_variations.sort(key=lambda x: x.count(','))
+ ''' By sorting based on the number of variations users can safely override
+ env and state in a larger combined variation
+ '''
+
+ for combined_variation in combined_variations:
+ v = combined_variation.split(",")
+ all_present = set(v).issubset(set(variation_tags))
+ if all_present:
+
+ combined_variation_meta = variations[combined_variation]
+
+ r = update_state_from_meta(
+ combined_variation_meta,
+ env,
+ state,
+ const,
+ const_state,
+ deps,
+ post_deps,
+ prehook_deps,
+ posthook_deps,
+ new_env_keys_from_meta,
+ new_state_keys_from_meta,
+ run_state,
+ i)
+ if r['return'] > 0:
+ return r
+
+ adr = get_adr(combined_variation_meta)
+ if adr:
+ self._merge_dicts_with_tags(
+ add_deps_recursive, adr)
+
+ if combined_variation_meta.get(
+ 'script_name', '') != '':
+ meta['script_name'] = combined_variation_meta['script_name']
+
+ if variation_meta.get('default_version', '') != '':
+ run_state['default_version'] = variation_meta['default_version']
+
+ if combined_variation_meta.get(
+ 'required_disk_space', 0) > 0 and combined_variation not in required_disk_space:
+ required_disk_space[combined_variation] = combined_variation_meta['required_disk_space']
+
+ if combined_variation_meta.get('warning', '') != '':
+ x = combined_variation_meta['warning']
+ if x not in warnings:
+ warnings.append(x)
+
+ # Processing them again using updated deps for add_deps_recursive
+ r = update_adr_from_meta(
+ deps,
+ post_deps,
+ prehook_deps,
+ posthook_deps,
+ add_deps_recursive,
+ env)
+ if r['return'] > 0:
+ return r
+
+ if len(required_disk_space) > 0:
+ required_disk_space_sum_mb = sum(
+ list(required_disk_space.values()))
+
+ warnings.append(
+ 'Required disk space: {} MB'.format(required_disk_space_sum_mb))
+
+ return {'return': 0, 'variation_tags_string': variation_tags_string,
+ 'explicit_variation_tags': explicit_variation_tags, 'warnings': warnings}
+
+ ##########################################################################
+ def _update_variation_tags_from_variations(
+ self, variation_tags, variations, variation_groups, excluded_variation_tags):
+
+ import copy
+ tmp_variation_tags_static = copy.deepcopy(variation_tags)
+ for v_i in range(len(tmp_variation_tags_static)):
+ v = tmp_variation_tags_static[v_i]
+
+ if v not in variations:
+ v_static = self._get_name_for_dynamic_variation_tag(v)
+ tmp_variation_tags_static[v_i] = v_static
+
+ combined_variations = [t for t in variations if ',' in t]
+ # We support default_variations in the meta of cmbined_variations
+ combined_variations.sort(key=lambda x: x.count(','))
+ ''' By sorting based on the number of variations users can safely override
+ env and state in a larger combined variation
+ '''
+ tmp_combined_variations = {k: False for k in combined_variations}
+
+ # Recursively add any base variations specified
+ if len(variation_tags) > 0:
+ tmp_variations = {k: False for k in variation_tags}
+ while True:
+ for variation_name in variation_tags:
+ tag_to_append = None
+
+ # ignore the excluded variations
+ if variation_name.startswith(
+ "~") or variation_name.startswith("-"):
+ tmp_variations[variation_name] = True
+ continue
+
+ if variation_name not in variations:
+ variation_name = self._get_name_for_dynamic_variation_tag(
+ variation_name)
+
+ # base variations are automatically turned on. Only
+ # variations outside of any variation group can be added as
+ # a base_variation
+ if "base" in variations[variation_name]:
+ base_variations = variations[variation_name]["base"]
+ for base_variation in base_variations:
+ dynamic_base_variation = False
+ dynamic_base_variation_already_added = False
+ if base_variation not in variations:
+ base_variation_dynamic = self._get_name_for_dynamic_variation_tag(
+ base_variation)
+ if not base_variation_dynamic or base_variation_dynamic not in variations:
+ return {'return': 1, 'error': 'Variation "{}" specified as base variation of "{}" is not existing'.format(
+ base_variation, variation_name)}
+ else:
+ dynamic_base_variation = True
+ base_prefix = base_variation_dynamic.split(".")[
+ 0] + "."
+ for x in variation_tags:
+ if x.startswith(base_prefix):
+ dynamic_base_variation_already_added = True
+
+ if base_variation not in variation_tags and not dynamic_base_variation_already_added:
+ tag_to_append = base_variation
+
+ if tag_to_append:
+ if tag_to_append in excluded_variation_tags:
+ return {'return': 1, 'error': 'Variation "{}" specified as base variation for the variation is in the excluded list "{}" '.format(
+ tag_to_append, variation_name)}
+ variation_tags.append(tag_to_append)
+ tmp_variations[tag_to_append] = False
+
+ tag_to_append = None
+
+ # default_variations dictionary specifies the
+ # default_variation for each variation group. A default
+ # variation in a group is turned on if no other variation
+ # from that group is turned on and it is not excluded using
+ # the '-' prefix
+ r = self._get_variation_tags_from_default_variations(
+ variations[variation_name],
+ variations,
+ variation_groups,
+ tmp_variation_tags_static,
+ excluded_variation_tags)
+ if r['return'] > 0:
+ return r
+
+ variations_to_add = r['variations_to_add']
+ for t in variations_to_add:
+ tmp_variations[t] = False
+ variation_tags.append(t)
+
+ tmp_variations[variation_name] = True
+
+ for combined_variation in combined_variations:
+ if tmp_combined_variations[combined_variation]:
+ continue
+ v = combined_variation.split(",")
+ all_present = set(v).issubset(set(variation_tags))
+ if all_present:
+ combined_variation_meta = variations[combined_variation]
+ tmp_combined_variations[combined_variation] = True
+
+ r = self._get_variation_tags_from_default_variations(
+ combined_variation_meta,
+ variations,
+ variation_groups,
+ tmp_variation_tags_static,
+ excluded_variation_tags)
+ if r['return'] > 0:
+ return r
+
+ variations_to_add = r['variations_to_add']
+ for t in variations_to_add:
+ tmp_variations[t] = False
+ variation_tags.append(t)
+
+ all_base_processed = True
+ for variation_name in variation_tags:
+ if variation_name.startswith("-"):
+ continue
+ if variation_name not in variations:
+ variation_name = self._get_name_for_dynamic_variation_tag(
+ variation_name)
+ if tmp_variations[variation_name] == False:
+ all_base_processed = False
+ break
+ if all_base_processed:
+ break
+ return {'return': 0}
+
+ ##########################################################################
+ def _get_variation_tags_from_default_variations(
+ self, variation_meta, variations, variation_groups, tmp_variation_tags_static, excluded_variation_tags):
+ # default_variations dictionary specifies the default_variation for
+ # each variation group. A default variation in a group is turned on if
+ # no other variation from that group is turned on and it is not
+ # excluded using the '-' prefix
+
+ tmp_variation_tags = []
+ if "default_variations" in variation_meta:
+ default_base_variations = variation_meta["default_variations"]
+ for default_base_variation in default_base_variations:
+ tag_to_append = None
+
+ if default_base_variation not in variation_groups:
+ return {'return': 1, 'error': 'Default variation "{}" is not a valid group. Valid groups are "{}" '.format(
+ default_base_variation, variation_groups)}
+
+ unique_allowed_variations = variation_groups[default_base_variation]['variations']
+ # add the default only if none of the variations from the
+ # current group is selected and it is not being excluded with -
+ # prefix
+ if len(set(unique_allowed_variations) & set(tmp_variation_tags_static)) == 0 and default_base_variations[
+ default_base_variation] not in excluded_variation_tags and default_base_variations[default_base_variation] not in tmp_variation_tags_static:
+ tag_to_append = default_base_variations[default_base_variation]
+
+ if tag_to_append:
+ if tag_to_append not in variations:
+ variation_tag_static = self._get_name_for_dynamic_variation_tag(
+ tag_to_append)
+ if not variation_tag_static or variation_tag_static not in variations:
+ return {'return': 1, 'error': 'Invalid variation "{}" specified in default variations for the variation "{}" '.format(
+ tag_to_append, variation_meta)}
+ tmp_variation_tags.append(tag_to_append)
+
+ return {'return': 0, 'variations_to_add': tmp_variation_tags}
+
+ ############################################################
+ def version(self, i):
+ """
+ Print version
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ """
+
+ console = i.get('out') == 'con'
+
+ version = self.__version__
+
+ if console:
+ logging.info(version)
+
+ return {'return': 0, 'version': version}
+
+ ############################################################
+
+ def search(self, i):
+ """
+ Overriding the automation search function to filter out scripts not matching the given variation tags
+
+ TBD: add input/output description
+ """
+
+ console = i.get('out') == 'con'
+
+ # Check simplified CMD: cm run script "get compiler"
+ # If artifact has spaces, treat them as tags!
+ artifact = i.get('artifact', '')
+ if ' ' in artifact: # or ',' in artifact:
+ del (i['artifact'])
+ if 'parsed_artifact' in i:
+ del (i['parsed_artifact'])
+ # Force substitute tags
+ i['tags'] = artifact.replace(' ', ',')
+
+ #######################################################################
+ # Process tags to find script(s) and separate variations
+ # (not needed to find scripts)
+ tags_string = i.get('tags', '').strip()
+
+ tags = [] if tags_string == '' else tags_string.split(',')
+
+ script_tags = []
+ variation_tags = []
+
+ for t in tags:
+ t = t.strip()
+ if t != '':
+ if t.startswith('_'):
+ tx = t[1:]
+ if tx not in variation_tags:
+ variation_tags.append(tx)
+ elif t.startswith('-_'):
+ tx = '-' + t[2:]
+ if tx not in variation_tags:
+ variation_tags.append(tx)
+ else:
+ script_tags.append(t)
+
+ excluded_tags = [v[1:] for v in script_tags if v.startswith("-")]
+ common = set(script_tags).intersection(set(excluded_tags))
+ if common:
+ return {
+ 'return': 1, 'error': 'There is common tags {} in the included and excluded lists'.format(common)}
+
+ excluded_variation_tags = [v[1:]
+ for v in variation_tags if v.startswith("-")]
+ common = set(variation_tags).intersection(set(excluded_variation_tags))
+ if common:
+ return {
+ 'return': 1, 'error': 'There is common variation tags {} in the included and excluded lists'.format(common)}
+
+ #######################################################################
+ # Find CM script(s) based on thier tags to get their meta (can be more than 1)
+ # Then check if variations exists inside meta
+
+ i['tags'] = ','.join(script_tags)
+
+ i['out'] = None
+ i['common'] = True
+
+ r = super(CAutomation, self).search(i)
+ if r['return'] > 0:
+ return r
+
+ lst = r['list']
+
+ r['unfiltered_list'] = lst
+
+ found_scripts = False if len(lst) == 0 else True
+
+ if found_scripts and len(variation_tags) > 0:
+ filtered = []
+
+ for script_artifact in lst:
+ meta = script_artifact.meta
+ variations = meta.get('variations', {})
+
+ matched = True
+ for t in variation_tags:
+ if t.startswith('-'):
+ t = t[1:]
+ if t in variations:
+ continue
+ matched = False
+ for s in variations:
+ if s.endswith('.#'):
+ if t.startswith(s[:-1]) and t[-1] != '.':
+ matched = True
+ break
+ if not matched:
+ break
+ if not matched:
+ continue
+
+ filtered.append(script_artifact)
+
+ if len(lst) > 0 and not filtered:
+ warning = [""]
+ for script in lst:
+ meta = script.meta
+ variations = meta.get('variations', {})
+ warning.append(
+ 'variation tags {} are not matching for the found script {} with variations {}\n'.format(
+ variation_tags, meta.get('alias'), variations.keys()))
+ r['warning'] = "\n".join(warning)
+
+ r['list'] = filtered
+
+ # Print filtered paths if console
+ if console:
+ for script in r['list']:
+
+ # This should not be logging since the output can be consumed by other external tools and scripts
+ # logging.info(script.path)
+ print(script.path)
+
+ # Finalize output
+ r['script_tags'] = script_tags
+ r['variation_tags'] = variation_tags
+ r['found_scripts'] = found_scripts
+
+ return r
+
+ ############################################################
+ def test(self, i):
+ """
+ Test automation (TBD)
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ automation (str): automation as CM string object
+
+ parsed_automation (list): prepared in CM CLI or CM access function
+ [ (automation alias, automation UID) ] or
+ [ (automation alias, automation UID), (automation repo alias, automation repo UID) ]
+
+ (artifact) (str): artifact as CM string object
+
+ (parsed_artifact) (list): prepared in CM CLI or CM access function
+ [ (artifact alias, artifact UID) ] or
+ [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
+
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * Output from this automation action
+
+ """
+
+ import json
+
+ # Check parsed automation
+ if 'parsed_automation' not in i:
+ return {'return': 1, 'error': 'automation is not specified'}
+
+ console = i.get('out') == 'con'
+
+ # Find CM artifact(s)
+ i['out'] = None
+ r = self.search(i)
+
+ if r['return'] > 0:
+ return r
+
+ lst = r['list']
+ for script_artifact in lst:
+ path = script_artifact.path
+ meta = script_artifact.meta
+ original_meta = script_artifact.original_meta
+
+ alias = meta.get('alias', '')
+ uid = meta.get('uid', '')
+ if console:
+ logging.info(path)
+ test_config = meta.get('tests', '')
+ if test_config:
+ logging.info(test_config)
+ variations = meta.get("variations")
+ tags_string = ",".join(meta.get("tags"))
+ test_input_index = i.get('test_input_index')
+ test_input_id = i.get('test_input_id')
+ run_inputs = i.get("run_inputs", test_config.get(
+ 'run_inputs', [{"docker_os": "ubuntu", "docker_os_version": "22.04"}]))
+ if test_input_index:
+ index_plus = False
+ try:
+ if test_input_index.endswith("+"):
+ input_index = int(test_input_index[:-1])
+ index_plus = True
+ else:
+ input_index = int(test_input_index)
+ except ValueError as e:
+ print(e)
+ return {
+ 'return': 1, 'error': f'Invalid test_input_index: {test_input_index}. Must be an integer or an integer followed by a +'}
+ if input_index > len(run_inputs):
+ run_inputs = []
+ else:
+ if index_plus:
+ run_inputs = run_inputs[index_index - 1:]
+ else:
+ run_inputs = [run_inputs[input_index - 1]]
+
+ for run_input in run_inputs:
+ if test_input_id:
+ if run_input.get('id', '') != test_input_id:
+ continue
+
+ ii = {'action': 'run',
+ 'automation': 'script',
+ 'quiet': i.get('quiet'),
+ }
+ test_all_variations = run_input.get(
+ 'test-all-variations', False)
+ if test_all_variations:
+ run_variations = [
+ f"_{v}" for v in variations if variations[v].get(
+ 'group',
+ '') == '' and str(
+ variations[v].get(
+ 'exclude-in-test',
+ '')).lower() not in [
+ "1",
+ "true",
+ "yes"]]
+ else:
+ given_variations = run_input.get(
+ 'variations_list', [])
+ if given_variations:
+ v_split = []
+ run_variations = []
+ for i, v in enumerate(given_variations):
+ v_split = v.split(",")
+ for t in v_split:
+ if not t.startswith("_"):
+ # variations must begin with _. We
+ # support both with and without _
+ # in the meta
+ given_variations[i] = f"_{t}"
+ if v_split:
+ run_variations.append(
+ ",".join(v_split))
+ else:
+ # run the test without any variations
+ run_variations = [""]
+ use_docker = run_input.get('docker', False)
+ for key in run_input: # override meta with any user inputs like for docker_cm_repo
+ if i.get(key):
+ if isinstance(run_input[key], dict):
+ utils.merge_dicts({
+ 'dict1': run_input[key],
+ 'dict2': i[key],
+ 'append_lists': True,
+ 'append_unique': True
+ })
+ else:
+ run_input[key] = i[key]
+
+ ii = {**ii, **run_input}
+ i_env = ii.get('env', i.get('env', {}))
+ if use_docker:
+ ii['action'] = "docker"
+ for key in i:
+ if key.startswith("docker_"):
+ ii[key] = i[key]
+
+ if ii.get('docker_image_name', '') == '':
+ ii['docker_image_name'] = alias
+
+ for variation_tags in run_variations:
+ run_tags = f"{tags_string},{variation_tags}"
+ ii['tags'] = run_tags
+ if i_env:
+ import copy
+ ii['env'] = copy.deepcopy(i_env)
+ logging.info(ii)
+ r = self.cmind.access(ii)
+ if r['return'] > 0:
+ return r
+
+ return {'return': 0, 'list': lst}
+
+ ############################################################
+
+ def native_run(self, i):
+ """
+ Add CM script
+
+ Args:
+ (CM input dict):
+
+ env (dict): environment
+ command (str): string
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ """
+
+ env = i.get('env', {})
+ cmd = i.get('command', '')
+
+ script = i.get('script', [])
+
+ # Create temporary script name
+ script_name = i.get('script_name', '')
+ if script_name == '':
+ script_name = 'tmp-native-run.'
+
+ if os.name == 'nt':
+ script_name += 'bat'
+ else:
+ script_name += 'sh'
+
+ if os.name == 'nt':
+ xcmd = 'call ' + script_name
+
+ if len(script) == 0:
+ script.append('@echo off')
+ script.append('')
+ else:
+ xcmd = 'chmod 755 ' + script_name + ' ; ./' + script_name
+
+ if len(script) == 0:
+ script.append('#!/bin/bash')
+ script.append('')
+
+ # Assemble env
+ if len(env) > 0:
+ for k in env:
+ v = env[k]
+
+ if os.name == 'nt':
+ script.append('set ' + k + '=' + v)
+ else:
+ if ' ' in v:
+ v = '"' + v + '"'
+ script.append('export ' + k + '=' + v)
+
+ script.append('')
+
+ # Add CMD
+ script.append(cmd)
+
+ # Record script
+ r = utils.save_txt(file_name=script_name, string='\n'.join(script))
+ if r['return'] > 0:
+ return r
+
+ # Run script
+ rc = os.system(xcmd)
+
+ return {'return': 0, 'return_code': rc}
+
+ ############################################################
+ def add(self, i):
+ """
+ Add CM script
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ parsed_artifact (list): prepared in CM CLI or CM access function
+ [ (artifact alias, artifact UID) ] or
+ [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
+
+ (tags) (str): tags to find an CM script (CM artifact)
+
+ (script_name) (str): name of script (it will be copied to the new entry and added to the meta)
+
+ (tags) (string or list): tags to be added to meta
+
+ (new_tags) (string or list): new tags to be added to meta (the same as tags)
+
+ (json) (bool): if True, record JSON meta instead of YAML
+
+ (meta) (dict): preloaded meta
+
+ (template) (string): template to use (python)
+ (python) (bool): template=python
+ (pytorch) (bool): template=pytorch
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ """
+
+ import shutil
+
+ console = i.get('out') == 'con'
+
+ # Try to find script artifact by alias and/or tags
+ ii = utils.sub_input(i, self.cmind.cfg['artifact_keys'])
+
+ parsed_artifact = i.get('parsed_artifact', [])
+
+ artifact_obj = parsed_artifact[0] if len(parsed_artifact) > 0 else None
+ artifact_repo = parsed_artifact[1] if len(
+ parsed_artifact) > 1 else None
+
+ script_name = ''
+ if 'script_name' in i:
+ script_name = i.get('script_name', '').strip()
+ del (i['script_name'])
+
+ if script_name != '' and not os.path.isfile(script_name):
+ return {'return': 1,
+ 'error': 'file {} not found'.format(script_name)}
+
+ # Move tags from input to meta of the newly created script artifact
+ tags_list = utils.convert_tags_to_list(i)
+ if 'tags' in i:
+ del (i['tags'])
+
+ if len(tags_list) == 0:
+ if console:
+ x = input(
+ 'Please specify a combination of unique tags separated by comma for this script: ')
+ x = x.strip()
+ if x != '':
+ tags_list = x.split(',')
+
+ if len(tags_list) == 0:
+ return {
+ 'return': 1, 'error': 'you must specify a combination of unique tags separate by comman using "--new_tags"'}
+
+ # Add placeholder (use common action)
+ ii['out'] = 'con'
+ # Avoid recursion - use internal CM add function to add the script
+ # artifact
+ ii['common'] = True
+
+ # Check template path
+ template_dir = 'template'
+
+ template = i.get('template', '')
+
+ if template == '':
+ if i.get('python', False):
+ template = 'python'
+ elif i.get('pytorch', False):
+ template = 'pytorch'
+
+ if template != '':
+ template_dir += '-' + template
+
+ template_path = os.path.join(self.path, template_dir)
+
+ if not os.path.isdir(template_path):
+ return {'return': 1, 'error': 'template path {} not found'.format(
+ template_path)}
+
+ # Check if preloaded meta exists
+ meta = {
+ 'cache': False
+ # 20240127: Grigori commented that because newly created script meta looks ugly
+ # 'new_env_keys':[],
+ # 'new_state_keys':[],
+ # 'input_mapping':{},
+ # 'docker_input_mapping':{},
+ # 'deps':[],
+ # 'prehook_deps':[],
+ # 'posthook_deps':[],
+ # 'post_deps':[],
+ # 'versions':{},
+ # 'variations':{},
+ # 'input_description':{}
+ }
+
+ fmeta = os.path.join(template_path, self.cmind.cfg['file_cmeta'])
+
+ r = utils.load_yaml_and_json(fmeta)
+ if r['return'] == 0:
+ utils.merge_dicts({'dict1': meta,
+ 'dict2': r['meta'],
+ 'append_lists': True,
+ 'append_unique': True})
+
+ # Check meta from CMD
+ xmeta = i.get('meta', {})
+
+ if len(xmeta) > 0:
+ utils.merge_dicts({'dict1': meta, 'dict2': xmeta,
+ 'append_lists': True, 'append_unique': True})
+
+ meta['automation_alias'] = self.meta['alias']
+ meta['automation_uid'] = self.meta['uid']
+ meta['tags'] = tags_list
+
+ script_name_base = script_name
+ script_name_ext = ''
+ if script_name != '':
+ # separate name and extension
+ j = script_name.rfind('.')
+ if j >= 0:
+ script_name_base = script_name[:j]
+ script_name_ext = script_name[j:]
+
+ meta['script_name'] = script_name_base
+
+ ii['meta'] = meta
+ ii['action'] = 'add'
+
+ use_yaml = True if not i.get('json', False) else False
+
+ if use_yaml:
+ ii['yaml'] = True
+
+ ii['automation'] = 'script,5b4e0237da074764'
+
+ for k in ['parsed_automation', 'parsed_artifact']:
+ if k in ii:
+ del ii[k]
+
+ if artifact_repo is not None:
+ ii['artifact'] = utils.assemble_cm_object2(
+ artifact_repo) + ':' + utils.assemble_cm_object2(artifact_obj)
+
+ r_obj = self.cmind.access(ii)
+ if r_obj['return'] > 0:
+ return r_obj
+
+ new_script_path = r_obj['path']
+
+ if console:
+ logging.info('Created script in {}'.format(new_script_path))
+
+ # Copy files from template (only if exist)
+ files = [
+ (template_path, 'README-extra.md', ''),
+ (template_path, 'customize.py', ''),
+ (template_path, 'main.py', ''),
+ (template_path, 'requirements.txt', ''),
+ (template_path, 'install_deps.bat', ''),
+ (template_path, 'install_deps.sh', ''),
+ (template_path, 'plot.bat', ''),
+ (template_path, 'plot.sh', ''),
+ (template_path, 'analyze.bat', ''),
+ (template_path, 'analyze.sh', ''),
+ (template_path, 'validate.bat', ''),
+ (template_path, 'validate.sh', '')
+ ]
+
+ if script_name == '':
+ files += [(template_path, 'run.bat', ''),
+ (template_path, 'run.sh', '')]
+ else:
+ if script_name_ext == '.bat':
+ files += [(template_path, 'run.sh', script_name_base + '.sh')]
+ files += [('', script_name, script_name)]
+
+ else:
+ files += [(template_path, 'run.bat',
+ script_name_base + '.bat')]
+ files += [('', script_name, script_name_base + '.sh')]
+
+ for x in files:
+ path = x[0]
+ f1 = x[1]
+ f2 = x[2]
+
+ if f2 == '':
+ f2 = f1
+
+ if path != '':
+ f1 = os.path.join(path, f1)
+
+ if os.path.isfile(f1):
+ f2 = os.path.join(new_script_path, f2)
+
+ if console:
+ logging.info(' * Copying {} to {}'.format(f1, f2))
+
+ shutil.copyfile(f1, f2)
+
+ return r_obj
+
+ ##########################################################################
+ def _get_name_for_dynamic_variation_tag(script, variation_tag):
+ '''
+ Returns the variation name in meta for the dynamic_variation_tag
+ '''
+ if "." not in variation_tag or variation_tag[-1] == ".":
+ return None
+ return variation_tag[:variation_tag.index(".") + 1] + "#"
+
+ ##########################################################################
+
+ def _update_variation_meta_with_dynamic_suffix(
+ script, variation_meta, variation_tag_dynamic_suffix):
+ '''
+ Updates the variation meta with dynamic suffix
+ '''
+ for key in variation_meta:
+ value = variation_meta[key]
+
+ if isinstance(value, list): # deps,pre_deps...
+ for item in value:
+ if isinstance(item, dict):
+ for item_key in item:
+ item_value = item[item_key]
+ if isinstance(
+ item_value, dict): # env,default_env inside deps
+ for item_key2 in item_value:
+ item_value[item_key2] = item_value[item_key2].replace(
+ "#", variation_tag_dynamic_suffix)
+ elif isinstance(item_value, list): # names for example
+ for i, l_item in enumerate(item_value):
+ if isinstance(l_item, str):
+ item_value[i] = l_item.replace(
+ "#", variation_tag_dynamic_suffix)
+ else:
+ item[item_key] = item[item_key].replace(
+ "#", variation_tag_dynamic_suffix)
+
+ elif isinstance(value, dict): # add_deps, env, ..
+ for item in value:
+ item_value = value[item]
+ if isinstance(item_value, dict): # deps
+ for item_key in item_value:
+ item_value2 = item_value[item_key]
+ if isinstance(
+ item_value2, dict): # env,default_env inside deps
+ for item_key2 in item_value2:
+ item_value2[item_key2] = item_value2[item_key2].replace(
+ "#", variation_tag_dynamic_suffix)
+ else:
+ item_value[item_key] = item_value[item_key].replace(
+ "#", variation_tag_dynamic_suffix)
+ else:
+ if isinstance(item_value, list): # lists inside env...
+ for i, l_item in enumerate(item_value):
+ if isinstance(l_item, str):
+ item_value[i] = l_item.replace(
+ "#", variation_tag_dynamic_suffix)
+ else:
+ value[item] = value[item].replace(
+ "#", variation_tag_dynamic_suffix)
+
+ else: # scalar value
+ pass # no dynamic update for now
+
+ ##########################################################################
+
+ def _get_variations_with_aliases(script, variation_tags, variations):
+ '''
+ Automatically turn on variation tags which are aliased by any given tag
+ '''
+ import copy
+ tmp_variation_tags = copy.deepcopy(variation_tags)
+
+ excluded_variations = [k[1:]
+ for k in variation_tags if k.startswith("-")]
+ for i, e in enumerate(excluded_variations):
+ if e not in variations:
+ dynamic_tag = script._get_name_for_dynamic_variation_tag(e)
+ if dynamic_tag and dynamic_tag in variations:
+ excluded_variations[i] = dynamic_tag
+
+ for k in variation_tags:
+ if k.startswith("-"):
+ continue
+ if k in variations:
+ variation = variations[k]
+ else:
+ variation = variations[script._get_name_for_dynamic_variation_tag(
+ k)]
+ if 'alias' in variation:
+
+ if variation['alias'] in excluded_variations:
+ return {'return': 1, 'error': 'Alias "{}" specified for the variation "{}" is conflicting with the excluded variation "-{}" '.format(
+ variation['alias'], k, variation['alias'])}
+
+ if variation['alias'] not in variations:
+ return {'return': 1, 'error': 'Alias "{}" specified for the variation "{}" is not existing '.format(
+ variation['alias'], k)}
+
+ if 'group' in variation:
+ return {
+ 'return': 1, 'error': 'Incompatible combinations: (alias, group) specified for the variation "{}" '.format(k)}
+
+ if 'default' in variation:
+ return {
+ 'return': 1, 'error': 'Incompatible combinations: (default, group) specified for the variation "{}" '.format(k)}
+
+ if variation['alias'] not in tmp_variation_tags:
+ tmp_variation_tags.append(variation['alias'])
+
+ return {'return': 0, 'variation_tags': tmp_variation_tags,
+ 'excluded_variation_tags': excluded_variations}
+
+ ##########################################################################
+
+ def _get_variation_groups(script, variations):
+
+ groups = {}
+
+ for k in variations:
+ variation = variations[k]
+ if not variation:
+ continue
+ if 'group' in variation:
+ if variation['group'] not in groups:
+ groups[variation['group']] = {}
+ groups[variation['group']]['variations'] = []
+ groups[variation['group']]['variations'].append(k)
+ if 'default' in variation:
+ if 'default' in groups[variation['group']]:
+ return {'return': 1, 'error': 'Multiple defaults specied for the variation group "{}": "{},{}" '.format(
+ variation['group'], k, groups[variation['group']]['default'])}
+ groups[variation['group']]['default'] = k
+
+ return {'return': 0, 'variation_groups': groups}
+
+ ##########################################################################
+
+ def _process_variation_tags_in_groups(
+ script, variation_tags, groups, excluded_variations, variations):
+ import copy
+ tmp_variation_tags = copy.deepcopy(variation_tags)
+ tmp_variation_tags_static = copy.deepcopy(variation_tags)
+
+ for v_i in range(len(tmp_variation_tags_static)):
+ v = tmp_variation_tags_static[v_i]
+
+ if v not in variations:
+ v_static = script._get_name_for_dynamic_variation_tag(v)
+ tmp_variation_tags_static[v_i] = v_static
+
+ for k in groups:
+ group = groups[k]
+ unique_allowed_variations = group['variations']
+
+ if len(set(unique_allowed_variations) &
+ set(tmp_variation_tags_static)) > 1:
+ return {'return': 1, 'error': 'Multiple variation tags selected for the variation group "{}": {} '.format(
+ k, str(set(unique_allowed_variations) & set(tmp_variation_tags_static)))}
+ if len(set(unique_allowed_variations) &
+ set(tmp_variation_tags_static)) == 0:
+ if 'default' in group and group['default'] not in excluded_variations:
+ tmp_variation_tags.append(group['default'])
+
+ return {'return': 0, 'variation_tags': tmp_variation_tags}
+
+ ##########################################################################
+
+ def _call_run_deps(script, deps, local_env_keys, local_env_keys_from_meta, env, state, const, const_state,
+ add_deps_recursive, recursion_spaces, remembered_selections, variation_tags_string, found_cached, debug_script_tags='',
+ verbose=False, show_time=False, extra_recursion_spaces=' ', run_state={'deps': [], 'fake_deps': [], 'parent': None}):
+ if len(deps) == 0:
+ return {'return': 0}
+
+ # Check chain of post hook dependencies on other CM scripts
+ import copy
+
+ # Get local env keys
+ local_env_keys = copy.deepcopy(local_env_keys)
+
+ if len(local_env_keys_from_meta) > 0:
+ local_env_keys += local_env_keys_from_meta
+
+ r = script._run_deps(deps, local_env_keys, env, state, const, const_state, add_deps_recursive, recursion_spaces,
+ remembered_selections, variation_tags_string, found_cached, debug_script_tags,
+ verbose, show_time, extra_recursion_spaces, run_state)
+ if r['return'] > 0:
+ return r
+
+ return {'return': 0}
+
+ ##########################################################################
+ def _run_deps(self, deps, clean_env_keys_deps, env, state, const, const_state, add_deps_recursive, recursion_spaces,
+ remembered_selections, variation_tags_string='', from_cache=False, debug_script_tags='',
+ verbose=False, show_time=False, extra_recursion_spaces=' ', run_state={'deps': [], 'fake_deps': [], 'parent': None}):
+ """
+ Runs all the enabled dependencies and pass them env minus local env
+ """
+
+ if len(deps) > 0:
+ # Preserve local env
+ tmp_env = {}
+
+ variation_groups = run_state.get('variation_groups')
+
+ for d in deps:
+
+ if not d.get('tags'):
+ continue
+
+ if is_dep_tobe_skipped(d, env):
+ continue
+
+ if from_cache and not d.get("dynamic", None):
+ continue
+
+ if d.get('env'):
+ # to update env local to a dependency
+ r = update_env_with_values(d['env'], False, env)
+ if r['return'] > 0:
+ return r
+
+ update_tags_from_env_with_prefix = d.get(
+ "update_tags_from_env_with_prefix", {})
+ for t in update_tags_from_env_with_prefix:
+ for key in update_tags_from_env_with_prefix[t]:
+ if str(d.get('env', {}).get(key, '')).strip() != '':
+ d['tags'] += "," + t + str(d.get('env')[key])
+ elif str(env.get(key, '')).strip() != '':
+ d['tags'] += "," + t + str(env[key])
+
+ for key in clean_env_keys_deps:
+ if '?' in key or '*' in key:
+ import fnmatch
+ for kk in list(env.keys()):
+ if fnmatch.fnmatch(kk, key):
+ tmp_env[kk] = env[kk]
+ del (env[kk])
+ elif key in env:
+ tmp_env[key] = env[key]
+ del (env[key])
+
+ import re
+ for key in list(env.keys()):
+ value = env[key]
+ tmp_values = re.findall(r'<<<(.*?)>>>', str(value))
+ if tmp_values == []:
+ continue
+ tmp_env[key] = env[key]
+ del (env[key])
+
+ force_env_keys_deps = d.get("force_env_keys", [])
+ for key in force_env_keys_deps:
+ if '?' in key or '*' in key:
+ import fnmatch
+ for kk in list(tmp_env.keys()):
+ if fnmatch.fnmatch(kk, key):
+ env[kk] = tmp_env[kk]
+ elif key in tmp_env:
+ env[key] = tmp_env[key]
+
+ if d.get("reuse_version", False):
+ for k in tmp_env:
+ if k.startswith('CM_VERSION'):
+ env[k] = tmp_env[k]
+
+ update_tags_from_env = d.get("update_tags_from_env", [])
+ for t in update_tags_from_env:
+ if env.get(t, '').strip() != '':
+ d['tags'] += "," + env[t]
+
+ inherit_variation_tags = d.get("inherit_variation_tags", False)
+ skip_inherit_variation_groups = d.get(
+ "skip_inherit_variation_groups", [])
+ variation_tags_to_be_skipped = []
+ if inherit_variation_tags:
+ if skip_inherit_variation_groups: # skips inheriting variations belonging to given groups
+ for group in variation_groups:
+ if group in skip_inherit_variation_groups:
+ variation_tags_to_be_skipped += variation_groups[group]['variations']
+
+ variation_tags = variation_tags_string.split(",")
+ variation_tags = [x for x in variation_tags if not x.startswith(
+ "_") or x[1:] not in set(variation_tags_to_be_skipped)]
+
+ # handle group in case of dynamic variations
+ for t_variation in variation_tags_to_be_skipped:
+ if t_variation.endswith(".#"):
+ beg = t_variation[:-1]
+ for m_tag in variation_tags:
+ if m_tag.startswith("_" + beg):
+ variation_tags.remove(m_tag)
+
+ deps_tags = d['tags'].split(",")
+ for tag in deps_tags:
+ if tag.startswith("-_") or tag.startswith("_-"):
+ variation_tag = "_" + tag[2:]
+ if variation_tag in variation_tags:
+ variation_tags.remove(variation_tag)
+ new_variation_tags_string = ",".join(variation_tags)
+ # deps should have non-empty tags
+ d['tags'] += "," + new_variation_tags_string
+
+ if run_state:
+ run_state['deps'].append(d['tags'])
+
+ if not run_state.get('fake_deps'):
+ import copy
+ if not run_state:
+ run_state_copy = {}
+ else:
+ run_state_copy = copy.deepcopy(run_state)
+ run_state_copy['deps'] = []
+
+ run_state_copy['parent'] = run_state['script_id']
+
+ if len(run_state['script_variation_tags']) > 0:
+ run_state_copy['parent'] += " ( " + ',_'.join(
+ run_state['script_variation_tags']) + " )"
+
+ # Run collective script via CM API:
+ # Not very efficient but allows logging - can be optimized
+ # later
+
+ ii = {
+ 'action': 'run',
+ 'automation': utils.assemble_cm_object(self.meta['alias'], self.meta['uid']),
+ 'recursion_spaces': recursion_spaces, # + extra_recursion_spaces,
+ 'recursion': True,
+ 'remembered_selections': remembered_selections,
+ 'env': env,
+ 'state': state,
+ 'const': copy.deepcopy(const),
+ 'const_state': copy.deepcopy(const_state),
+ 'add_deps_recursive': add_deps_recursive,
+ 'debug_script_tags': debug_script_tags,
+ 'verbose': verbose,
+ 'silent': run_state.get('tmp_silent', False),
+ 'time': show_time,
+ 'run_state': run_state_copy
+
+ }
+
+ for key in ["env", "state", "const", "const_state"]:
+ ii['local_' + key] = d.get(key, {})
+ if d.get(key):
+ d[key] = {}
+
+ utils.merge_dicts(
+ {'dict1': ii, 'dict2': d, 'append_lists': True, 'append_unique': True})
+
+ r = self.cmind.access(ii)
+ if r['return'] > 0:
+ return r
+
+ run_state['version_info'] = run_state_copy.get(
+ 'version_info')
+
+ # Restore local env
+ env.update(tmp_env)
+ r = update_env_with_values(env)
+ if r['return'] > 0:
+ return r
+
+ # Update env/state with cost
+ env.update(const)
+ utils.merge_dicts({'dict1': state,
+ 'dict2': const_state,
+ 'append_lists': True,
+ 'append_unique': True})
+
+ return {'return': 0}
+
+ ##########################################################################
+ def _merge_dicts_with_tags(self, dict1, dict2):
+ """
+ Merges two dictionaries and append any tag strings in them
+ """
+ if dict1 == dict2:
+ return {'return': 0}
+ for dep in dict1:
+ if 'tags' in dict1[dep]:
+ dict1[dep]['tags_list'] = utils.convert_tags_to_list(
+ dict1[dep])
+ for dep in dict2:
+ if 'tags' in dict2[dep]:
+ dict2[dep]['tags_list'] = utils.convert_tags_to_list(
+ dict2[dep])
+ utils.merge_dicts({'dict1': dict1, 'dict2': dict2,
+ 'append_lists': True, 'append_unique': True})
+ for dep in dict1:
+ if 'tags_list' in dict1[dep]:
+ dict1[dep]['tags'] = ",".join(dict1[dep]['tags_list'])
+ del (dict1[dep]['tags_list'])
+ for dep in dict2:
+ if 'tags_list' in dict2[dep]:
+ del (dict2[dep]['tags_list'])
+
+ ##########################################################################
+ def _get_readme(self, cmd_parts, run_state):
+ """
+ Outputs a Markdown README file listing the CM run commands for the dependencies
+ """
+
+ deps = run_state['deps']
+
+ version_info = run_state.get('version_info', [])
+ version_info_dict = {}
+
+ for v in version_info:
+ k = list(v.keys())[0]
+ version_info_dict[k] = v[k]
+
+ content = ''
+
+ content += """
+*This README was automatically generated by the [CM framework](https://github.com/mlcommons/ck).*
+
+## Install CM
+
+```bash
+pip install cmind -U
+```
+
+Check [this readme](https://github.com/mlcommons/ck/blob/master/docs/installation.md)
+with more details about installing CM and dependencies across different platforms
+(Ubuntu, MacOS, Windows, RHEL, ...).
+
+## Install CM automation repositories
+
+```bash
+cm pull repo mlcommons@cm4mlops --checkout=dev
+"""
+
+ current_cm_repo = run_state['script_repo_alias']
+ if current_cm_repo not in ['mlcommons@ck', 'mlcommons@cm4mlops']:
+ content += '\ncm pull repo ' + \
+ run_state['script_repo_alias'] + '\n'
+
+ content += """```
+
+## Run CM script
+
+```bash
+"""
+
+ cmd = "cm run script "
+
+ for cmd_part in cmd_parts:
+ x = '"' if ' ' in cmd_part and not cmd_part.startswith('-') else ''
+ cmd = cmd + " " + x + cmd_part + x
+
+ content += cmd + '\n'
+
+ content += """```
+
+## Run individual CM scripts to customize dependencies (optional)
+
+"""
+ deps_ = ''
+
+ for dep_tags in deps:
+
+ xversion = ''
+ version = version_info_dict.get(dep_tags, {}).get('version', '')
+ if version != '':
+ xversion = ' --version={}\n'.format(version)
+
+ content += "```bash\n"
+ content += "cm run script --tags=" + \
+ dep_tags + "{}\n".format(xversion)
+ content += "```\n\n"
+
+ return content
+
+ ##########################################################################
+ def _get_docker_container(self, cmd_parts, run_state):
+ """
+ Outputs a Markdown README file listing the CM run commands for the dependencies
+ """
+
+ deps = run_state['deps']
+
+ version_info = run_state.get('version_info', [])
+ version_info_dict = {}
+
+ for v in version_info:
+ k = list(v.keys())[0]
+ version_info_dict[k] = v[k]
+
+ content = ''
+
+ content += """
+
+# The following CM commands were automatically generated (prototype)
+
+cm pull repo mlcommons@cm4mlops --checkout=dev
+
+"""
+ current_cm_repo = run_state['script_repo_alias']
+ if current_cm_repo not in ['mlcommons@ck', 'mlcommons@cm4mlops']:
+ content += '\ncm pull repo ' + \
+ run_state['script_repo_alias'] + '\n\n'
+
+ deps_ = ''
+
+ for dep_tags in deps:
+
+ xversion = ''
+ version = version_info_dict.get(dep_tags, {}).get('version', '')
+ if version != '':
+ xversion = ' --version={}\n'.format(version)
+
+ content += "# cm run script --tags=" + \
+ dep_tags + "{}\n\n".format(xversion)
+
+ cmd = "cm run script "
+
+ for cmd_part in cmd_parts:
+ x = '"' if ' ' in cmd_part and not cmd_part.startswith('-') else ''
+ cmd = cmd + " " + x + cmd_part + x
+
+ content += cmd + '\n'
+
+ return content
+
+ ##########################################################################
+
+ def _print_versions(self, run_state):
+ """
+ Print versions in the nice format
+ """
+
+ version_info = run_state.get('version_info', [])
+
+ logging.info('=========================')
+ logging.info('Versions of dependencies:')
+ for v in version_info:
+ k = list(v.keys())[0]
+ version_info_dict = v[k]
+
+ version = version_info_dict.get('version', '')
+
+ if version != '':
+ logging.info('* {}: {}'.format(k, version))
+
+ logging.info('=========================')
+
+ return {}
+
+ ##########################################################################
+ def _markdown_cmd(self, cmd):
+ """
+ Returns a CM command in markdown format
+ """
+
+ return '```bash\n ' + cmd + ' \n ```'
+
+ ##########################################################################
+
+ def _print_deps(self, deps):
+ """
+ Prints the CM run commands for the list of CM script dependencies
+ """
+
+ print_deps_data = []
+ run_cmds = self._get_deps_run_cmds(deps)
+ for cmd in run_cmds:
+ print_deps_data.append(cmd)
+ logging.info(cmd)
+
+ return print_deps_data
+
+ ##########################################################################
+
+ def _get_deps_run_cmds(self, deps):
+ """
+ Returns the CM run commands for the list of CM script dependencies
+ """
+
+ run_cmds = []
+
+ for dep_tags in deps:
+ run_cmds.append("cm run script --tags=" + dep_tags)
+
+ return run_cmds
+
+ ##########################################################################
+
+ def run_native_script(self, i):
+ """
+ Run native script in a CM script entry
+ (wrapper around "prepare_and_run_script_with_postprocessing" function)
+
+ Args:
+ (dict):
+
+ run_script_input (dict): saved input for "prepare_and_run_script_with_postprocessing" function
+ env (dict): the latest environment for the script
+ script_name (str): native script name
+
+ Returns:
+ (dict): Output from "prepare_and_run_script_with_postprocessing" function
+
+
+ """
+
+ import copy
+
+ run_script_input = i['run_script_input']
+ script_name = i['script_name']
+ env = i.get('env', '')
+ detect_version = i.get('detect_version', '')
+
+ if detect_version:
+ postprocess = "detect_version"
+ else:
+ postprocess = ""
+
+ # Create and work on a copy to avoid contamination
+ env_copy = copy.deepcopy(run_script_input.get('env', {}))
+ run_script_input_state_copy = copy.deepcopy(
+ run_script_input.get('state', {}))
+ script_name_copy = run_script_input.get('script_name', '')
+
+ run_script_input['script_name'] = script_name
+ run_script_input['env'] = env
+
+ r = prepare_and_run_script_with_postprocessing(
+ run_script_input, postprocess=postprocess)
+
+ env_tmp = copy.deepcopy(run_script_input['env'])
+ r['env_tmp'] = env_tmp
+
+ run_script_input['state'] = run_script_input_state_copy
+ run_script_input['env'] = env_copy
+ run_script_input['script_name'] = script_name_copy
+
+ return r
+
+ ##########################################################################
+ def find_file_in_paths(self, i):
+ """
+ Find file name in a list of paths
+
+ Args:
+ (CM input dict):
+
+ paths (list): list of paths
+ file_name (str): filename pattern to find
+ (select) (bool): if True and more than 1 path found, select
+ (select_default) (bool): if True, select the default one
+ (recursion_spaces) (str): add space to print
+ (run_script_input) (dict): prepared dict to run script and detect version
+
+ (detect_version) (bool): if True, attempt to detect version
+ (env_path) (str): env key to pass path to the script to detect version
+ (run_script_input) (dict): use this input to run script to detect version
+ (env) (dict): env to check/force version
+
+ (hook) (func): call this func to skip some artifacts
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ (found_files) (list): paths to files when found
+
+ """
+ import copy
+
+ paths = i['paths']
+ select = i.get('select', False)
+ select_default = i.get('select_default', False)
+ recursion_spaces = i.get('recursion_spaces', '')
+
+ hook = i.get('hook', None)
+
+ verbose = i.get('verbose', False)
+ if not verbose:
+ verbose = i.get('v', False)
+
+ file_name = i.get('file_name', '')
+ file_name_re = i.get('file_name_re', '')
+ file_is_re = False
+
+ if file_name_re != '':
+ file_name = file_name_re
+ file_is_re = True
+
+ if file_name == '':
+ raise Exception(
+ 'file_name or file_name_re not specified in find_artifact')
+
+ found_files = []
+
+ import glob
+ import re
+
+ for path in paths:
+ # May happen that path is in variable but it doesn't exist anymore
+ if os.path.isdir(path):
+ if file_is_re:
+ file_list = [
+ os.path.join(
+ path,
+ f) for f in os.listdir(path) if re.match(
+ file_name,
+ f)]
+
+ for f in file_list:
+ duplicate = False
+ for existing in found_files:
+ if os.path.samefile(existing, f):
+ duplicate = True
+ break
+ if not duplicate:
+ skip = False
+ if hook is not None:
+ r = hook({'file': f})
+ if r['return'] > 0:
+ return r
+ skip = r['skip']
+ if not skip:
+ found_files.append(f)
+
+ else:
+ path_to_file = os.path.join(path, file_name)
+
+ file_pattern_suffixes = [
+ "",
+ ".[0-9]",
+ ".[0-9][0-9]",
+ "-[0-9]",
+ "-[0-9][0-9]",
+ "[0-9]",
+ "[0-9][0-9]",
+ "[0-9].[0-9]",
+ "[0-9][0-9].[0-9]",
+ "[0-9][0-9].[0-9][0-9]"
+ ]
+
+ for suff in file_pattern_suffixes:
+ file_list = glob.glob(path_to_file + suff)
+ for f in file_list:
+ duplicate = False
+
+ for existing in found_files:
+ try:
+ if os.path.samefile(existing, f):
+ duplicate = True
+ break
+ except Exception as e:
+ # This function fails on Windows sometimes
+ # because some files can't be accessed
+ pass
+
+ if not duplicate:
+ skip = False
+ if hook is not None:
+ r = hook({'file': f})
+ if r['return'] > 0:
+ return r
+ skip = r['skip']
+ if not skip:
+ found_files.append(f)
+
+ if select:
+ # Check and prune versions
+ if i.get('detect_version', False):
+ found_paths_with_good_version = []
+ found_files_with_good_version = []
+
+ env = i.get('env', {})
+
+ run_script_input = i['run_script_input']
+ env_path_key = i['env_path_key']
+
+ version = env.get('CM_VERSION', '')
+ version_min = env.get('CM_VERSION_MIN', '')
+ version_max = env.get('CM_VERSION_MAX', '')
+
+ x = ''
+
+ if version != '':
+ x += ' == {}'.format(version)
+ if version_min != '':
+ x += ' >= {}'.format(version_min)
+ if version_max != '':
+ x += ' <= {}'.format(version_max)
+
+ if x != '':
+ logging.info(
+ recursion_spaces +
+ ' - Searching for versions: {}'.format(x))
+
+ new_recursion_spaces = recursion_spaces + ' '
+
+ for path_to_file in found_files:
+ logging.info(recursion_spaces + ' * ' + path_to_file)
+
+ run_script_input['env'] = env
+ run_script_input['env'][env_path_key] = path_to_file
+ run_script_input['recursion_spaces'] = new_recursion_spaces
+
+ rx = prepare_and_run_script_with_postprocessing(
+ run_script_input, postprocess="detect_version")
+
+ run_script_input['recursion_spaces'] = recursion_spaces
+
+ if rx['return'] > 0:
+ if rx['return'] != 2:
+ return rx
+ else:
+ # Version was detected
+
+ detected_version = rx.get('version', '')
+
+ if detected_version != '':
+ if detected_version == -1:
+ logging.info(
+ recursion_spaces + ' SKIPPED due to incompatibility ...')
+ else:
+ ry = check_version_constraints({'detected_version': detected_version,
+ 'version': version,
+ 'version_min': version_min,
+ 'version_max': version_max,
+ 'cmind': self.cmind})
+ if ry['return'] > 0:
+ return ry
+
+ if not ry['skip']:
+ found_files_with_good_version.append(
+ path_to_file)
+ else:
+ logging.info(
+ recursion_spaces + ' SKIPPED due to version constraints ...')
+
+ found_files = found_files_with_good_version
+
+ # Continue with selection
+ if len(found_files) > 1:
+ if len(found_files) == 1 or select_default:
+ selection = 0
+ else:
+ # Select 1 and proceed
+ logging.info(
+ recursion_spaces +
+ ' - More than 1 path found:')
+ num = 0
+
+ for file in found_files:
+ logging.info(
+ recursion_spaces +
+ ' {}) {}'.format(
+ num,
+ file))
+ num += 1
+ x = input(recursion_spaces +
+ ' Make your selection or press Enter for 0: ')
+
+ x = x.strip()
+ if x == '':
+ x = '0'
+
+ selection = int(x)
+
+ if selection < 0 or selection >= num:
+ selection = 0
+ logging.info(
+ recursion_spaces +
+ ' Selected {}: {}'.format(
+ selection,
+ found_files[selection]))
+
+ found_files = [found_files[selection]]
+
+ return {'return': 0, 'found_files': found_files}
+
+ ##########################################################################
+ def detect_version_using_script(self, i):
+ """
+ Detect version using script
+
+ Args:
+ (CM input dict):
+
+ (recursion_spaces) (str): add space to print
+
+ run_script_input (dict): use this input to run script to detect version
+ (env) (dict): env to check/force version
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ 16 if not detected
+ * (error) (str): error string if return>0
+
+ (detected_version) (str): detected version
+
+ """
+ recursion_spaces = i.get('recursion_spaces', '')
+
+ import copy
+
+ detected = False
+
+ env = i.get('env', {})
+
+ run_script_input = i['run_script_input']
+
+ version = env.get('CM_VERSION', '')
+ version_min = env.get('CM_VERSION_MIN', '')
+ version_max = env.get('CM_VERSION_MAX', '')
+
+ x = ''
+
+ if version != '':
+ x += ' == {}'.format(version)
+ if version_min != '':
+ x += ' >= {}'.format(version_min)
+ if version_max != '':
+ x += ' <= {}'.format(version_max)
+
+ if x != '':
+ logging.info(
+ recursion_spaces +
+ ' - Searching for versions: {}'.format(x))
+
+ new_recursion_spaces = recursion_spaces + ' '
+
+ run_script_input['recursion_spaces'] = new_recursion_spaces
+ run_script_input['env'] = env
+
+ # Prepare run script
+ rx = prepare_and_run_script_with_postprocessing(
+ run_script_input, postprocess="detect_version")
+
+ run_script_input['recursion_spaces'] = recursion_spaces
+
+ if rx['return'] == 0:
+ # Version was detected
+ detected_version = rx.get('version', '')
+
+ if detected_version != '':
+ ry = check_version_constraints({'detected_version': detected_version,
+ 'version': version,
+ 'version_min': version_min,
+ 'version_max': version_max,
+ 'cmind': self.cmind})
+ if ry['return'] > 0:
+ return ry
+
+ if not ry['skip']:
+ return {'return': 0, 'detected_version': detected_version}
+
+ return {'return': 16, 'error': 'version was not detected'}
+
+ ##########################################################################
+ def find_artifact(self, i):
+ """
+ Find some artifact (file) by name
+
+ Args:
+ (CM input dict):
+
+ file_name (str): filename to find
+
+ env (dict): global env
+ os_info (dict): OS info
+
+ (detect_version) (bool): if True, attempt to detect version
+ (env_path) (str): env key to pass path to the script to detect version
+ (run_script_input) (dict): use this input to run script to detect version
+
+ (default_path_env_key) (str): check in default paths from global env
+ (PATH, PYTHONPATH, LD_LIBRARY_PATH ...)
+
+ (recursion_spaces) (str): add space to print
+
+ (hook) (func): call this func to skip some artifacts
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+ error = 16 if artifact not found but no problem
+
+ found_path (list): found path to an artifact
+ full_path (str): full path to a found artifact
+ default_path_list (list): list of default paths
+
+ """
+
+ import copy
+
+ file_name = i['file_name']
+
+ os_info = i['os_info']
+
+ env = i['env']
+
+ env_path_key = i.get('env_path_key', '')
+
+ run_script_input = i.get('run_script_input', {})
+ extra_paths = i.get('extra_paths', {})
+
+ # Create and work on a copy to avoid contamination
+ env_copy = copy.deepcopy(env)
+ run_script_input_state_copy = copy.deepcopy(
+ run_script_input.get('state', {}))
+
+ default_path_env_key = i.get('default_path_env_key', '')
+ recursion_spaces = i.get('recursion_spaces', '')
+
+ hook = i.get('hook', None)
+
+ # Check if forced to search in a specific path or multiple paths
+ # separated by OS var separator (usually : or ;)
+ path = env.get('CM_TMP_PATH', '')
+
+ if path != '' and env.get(
+ 'CM_TMP_PATH_IGNORE_NON_EXISTANT', '') != 'yes':
+ # Can be a list of paths
+ path_list_tmp = path.split(os_info['env_separator'])
+ for path_tmp in path_list_tmp:
+ if path_tmp.strip() != '' and not os.path.isdir(path_tmp):
+ return {'return': 1,
+ 'error': 'path {} doesn\'t exist'.format(path_tmp)}
+
+ # Check if forced path and file name from --input (CM_INPUT - local env
+ # - will not be visible for higher-level script)
+ forced_file = env.get('CM_INPUT', '').strip()
+ if forced_file != '':
+ if not os.path.isfile(forced_file):
+ return {'return': 1,
+ 'error': 'file {} doesn\'t exist'.format(forced_file)}
+
+ file_name = os.path.basename(forced_file)
+ path = os.path.dirname(forced_file)
+
+ default_path_list = self.get_default_path_list(i)
+ # [] if default_path_env_key == '' else \
+ # os.environ.get(default_path_env_key,'').split(os_info['env_separator'])
+
+ if path == '':
+ path_list_tmp = default_path_list
+ else:
+ logging.info(
+ recursion_spaces +
+ ' # Requested paths: {}'.format(path))
+ path_list_tmp = path.split(os_info['env_separator'])
+
+ # Check soft links
+ path_list_tmp2 = []
+ for path_tmp in path_list_tmp:
+ # path_tmp_abs = os.path.realpath(os.path.join(path_tmp, file_name))
+ # GF: I remarked above code because it doesn't work correcly
+ # for virtual python - it unsoftlinks virtual python and picks up
+ # native one from /usr/bin thus making workflows work incorrectly
+ # ...
+ path_tmp_abs = os.path.join(path_tmp, file_name)
+
+ if not path_tmp_abs in path_list_tmp2:
+ path_list_tmp2.append(path_tmp_abs)
+
+ path_list = []
+ for path_tmp in path_list_tmp2:
+ path_list.append(os.path.dirname(path_tmp))
+
+ # Check if quiet
+ select_default = True if env.get('CM_QUIET', '') == 'yes' else False
+
+ # Prepare paths to search
+ r = self.find_file_in_paths({'paths': path_list,
+ 'file_name': file_name,
+ 'select': True,
+ 'select_default': select_default,
+ 'detect_version': i.get('detect_version', False),
+ 'env_path_key': env_path_key,
+ 'env': env_copy,
+ 'hook': hook,
+ 'run_script_input': run_script_input,
+ 'recursion_spaces': recursion_spaces})
+
+ run_script_input['state'] = run_script_input_state_copy
+
+ if r['return'] > 0:
+ return r
+
+ found_files = r['found_files']
+
+ if len(found_files) == 0:
+ return {'return': 16, 'error': '{} not found'.format(file_name)}
+
+ # Finalize output
+ file_path = found_files[0]
+ found_path = os.path.dirname(file_path)
+
+ if found_path not in default_path_list:
+ env_key = '+' + default_path_env_key
+
+ paths = env.get(env_key, [])
+ if found_path not in paths:
+ paths.insert(0, found_path)
+ env[env_key] = paths
+ for extra_path in extra_paths:
+ epath = os.path.normpath(
+ os.path.join(found_path, "..", extra_path))
+ if os.path.exists(epath):
+ if extra_paths[extra_path] not in env:
+ env[extra_paths[extra_path]] = []
+ env[extra_paths[extra_path]].append(epath)
+ logging.info(
+ recursion_spaces +
+ ' # Found artifact in {}'.format(file_path))
+
+ if env_path_key != '':
+ env[env_path_key] = file_path
+
+ return {'return': 0, 'found_path': found_path,
+ 'found_file_path': file_path,
+ 'found_file_name': os.path.basename(file_path),
+ 'default_path_list': default_path_list}
+
+ ##########################################################################
+ def find_file_deep(self, i):
+ """
+ Find file name in a list of paths
+
+ Args:
+ (CM input dict):
+
+ paths (list): list of paths
+ file_name (str): filename pattern to find
+ (restrict_paths) (list): restrict found paths to these combinations
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ (found_paths) (list): paths to files when found
+
+ """
+
+ paths = i['paths']
+ file_name = i['file_name']
+
+ restrict_paths = i.get('restrict_paths', [])
+
+ found_paths = []
+
+ for p in paths:
+ if os.path.isdir(p):
+ p1 = os.listdir(p)
+ for f in p1:
+ p2 = os.path.join(p, f)
+
+ if os.path.isdir(p2):
+ r = self.find_file_deep(
+ {'paths': [p2], 'file_name': file_name, 'restrict_paths': restrict_paths})
+ if r['return'] > 0:
+ return r
+
+ found_paths += r['found_paths']
+ else:
+ if f == file_name:
+ found_paths.append(p)
+ break
+
+ if len(found_paths) > 0 and len(restrict_paths) > 0:
+ filtered_found_paths = []
+
+ for p in found_paths:
+ for f in restrict_paths:
+ if f in p:
+ filtered_found_paths.append(p)
+ break
+
+ found_paths = filtered_found_paths
+
+ return {'return': 0, 'found_paths': found_paths}
+
+ ##########################################################################
+ def find_file_back(self, i):
+ """
+ Find file name backwards
+
+ Args:
+ (CM input dict):
+
+ path (str): path to start with
+ file_name (str): filename or directory to find
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ (found_path) (str): path if found or empty
+
+ """
+
+ path = i['path']
+ file_name = i['file_name']
+
+ found_path = ''
+
+ while path != '':
+ path_to_file = os.path.join(path, file_name)
+ if os.path.isfile(path_to_file):
+ break
+
+ path2 = os.path.dirname(path)
+
+ if path2 == path:
+ path = ''
+ break
+ else:
+ path = path2
+
+ return {'return': 0, 'found_path': path}
+
+ ##########################################################################
+ def parse_version(self, i):
+ """
+ Parse version (used in post processing functions)
+
+ Args:
+ (CM input dict):
+
+ (file_name) (str): filename to get version from (tmp-ver.out by default)
+ match_text (str): RE match text string
+ group_number (int): RE group number to get version from
+ env_key (str): which env key to update
+ which_env (dict): which env to update
+ (debug) (boolean): if True, print some debug info
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ version (str): detected version
+ string (str): full file string
+
+ """
+
+ file_name = i.get('file_name', '')
+ if file_name == '':
+ file_name = self.tmp_file_ver
+
+ match_text = i['match_text']
+ group_number = i['group_number']
+ env_key = i['env_key']
+ which_env = i['which_env']
+ debug = i.get('debug', False)
+
+ r = utils.load_txt(file_name=file_name,
+ check_if_exists=True,
+ split=True,
+ match_text=match_text,
+ fail_if_no_match='version was not detected')
+ if r['return'] > 0:
+ if r.get('string', '') != '':
+ r['error'] += ' ({})'.format(r['string'])
+ return r
+
+ string = r['string']
+
+ if r['match'].lastindex and r['match'].lastindex >= group_number:
+ version = r['match'].group(group_number)
+ else:
+ return {'return': 1, 'error': 'Invalid version detection group number. Version was not detected. Last index of match = {}. Given group number = {}'.format(
+ r['match'].lastindex, group_number)}
+
+ which_env[env_key] = version
+ # to be recorded in the cache meta
+ which_env['CM_DETECTED_VERSION'] = version
+
+ return {'return': 0, 'version': version, 'string': string}
+
+ ##########################################################################
+ def update_deps(self, i):
+ """
+ Update deps from pre/post processing
+ Args:
+ (CM input dict):
+ deps (dict): deps dict
+ update_deps (dict): key matches "names" in deps
+ Returns:
+ (CM return dict):
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+ """
+
+ deps = i['deps']
+ add_deps = i['update_deps']
+ env = i.get('env', {})
+ update_deps(deps, add_deps, False, env)
+
+ return {'return': 0}
+
+ ##########################################################################
+ def update_state_from_meta(self, meta, env, state, const, const_state, deps, post_deps,
+ prehook_deps, posthook_deps, new_env_keys, new_state_keys, run_state, i):
+ """
+ Updates state and env from meta
+ Args:
+ """
+
+ r = update_state_from_meta(
+ meta,
+ env,
+ state,
+ const,
+ const_state,
+ deps,
+ post_deps,
+ prehook_deps,
+ posthook_deps,
+ new_env_keys,
+ new_state_keys,
+ run_state,
+ i)
+ if r['return'] > 0:
+ return r
+
+ return {'return': 0}
+
+ ##########################################################################
+ def get_default_path_list(self, i):
+ default_path_env_key = i.get('default_path_env_key', '')
+ os_info = i['os_info']
+ default_path_list = [] if default_path_env_key == '' else \
+ os.environ.get(
+ default_path_env_key,
+ '').split(
+ os_info['env_separator'])
+
+ return default_path_list
+
+ ############################################################
+
+ def doc(self, i):
+ """
+ Document CM script.
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ parsed_artifact (list): prepared in CM CLI or CM access function
+ [ (artifact alias, artifact UID) ] or
+ [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
+
+ (repos) (str): list of repositories to search for automations
+
+ (output_dir) (str): output directory (../docs by default)
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ """
+
+ return utils.call_internal_module(
+ self, __file__, 'module_misc', 'doc', i)
+
+ ############################################################
+ def gui(self, i):
+ """
+ Run GUI for CM script.
+
+ Args:
+ (CM input dict):
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ """
+
+ artifact = i.get('artifact', '')
+ tags = ''
+ if artifact != '':
+ if ' ' in artifact:
+ tags = artifact.replace(' ', ',')
+
+ if tags == '':
+ tags = i.get('tags', '')
+
+ if 'tags' in i:
+ del (i['tags'])
+
+ i['action'] = 'run'
+ i['artifact'] = 'gui'
+ i['parsed_artifact'] = [('gui', '605cac42514a4c69')]
+ i['script'] = tags.replace(',', ' ')
+
+ return self.cmind.access(i)
+
+ ############################################################
+
+ def dockerfile(self, i):
+ """
+ Generate Dockerfile for CM script.
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ parsed_artifact (list): prepared in CM CLI or CM access function
+ [ (artifact alias, artifact UID) ] or
+ [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
+
+ (repos) (str): list of repositories to search for automations
+
+ (output_dir) (str): output directory (./ by default)
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ """
+
+ return utils.call_internal_module(
+ self, __file__, 'module_misc', 'dockerfile', i)
+
+ ############################################################
+ def docker(self, i):
+ """
+ Run CM script in an automatically-generated container.
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ (repos) (str): list of repositories to search for automations
+
+ (output_dir) (str): output directory (./ by default)
+
+ (docker) (dict): convert keys into docker_{key} strings for CM >= 2.3.8.1
+
+
+ (docker_skip_build) (bool): do not generate Dockerfiles and do not recreate Docker image (must exist)
+ (docker_noregenerate) (bool): do not generate Dockerfiles
+ (docker_norecreate) (bool): do not recreate Docker image
+
+ (docker_cfg) (str): if True, show all available basic docker configurations, otherwise pre-select one
+ (docker_cfg_uid) (str): if True, select docker configuration with this UID
+
+ (docker_path) (str): where to create or find Dockerfile
+ (docker_gh_token) (str): GitHub token for private repositories
+ (docker_save_script) (str): if !='' name of script to save docker command
+ (docker_interactive) (bool): if True, run in interactive mode
+ (docker_it) (bool): the same as `docker_interactive`
+ (docker_detached) (bool): detach Docker
+ (docker_dt) (bool) the same as `docker_detached`
+
+ (docker_base_image) (str): force base image
+ (docker_os) (str): force docker OS (default: ubuntu)
+ (docker_os_version) (str): force docker OS version (default: 22.04)
+ (docker_image_tag_extra) (str): add extra tag (default:-latest)
+
+ (docker_cm_repo) (str): force CM automation repository when building Docker (default: cm4mlops)
+ (docker_cm_repos)
+ (docker_cm_repo_flags)
+
+ (dockerfile_env)
+
+ (docker_skip_cm_sys_upgrade) (bool): if True, do not install CM sys deps
+
+ (docker_extra_sys_deps)
+
+ (fake_run_deps)
+ (docker_run_final_cmds)
+
+ (all_gpus)
+ (num_gpus)
+
+ (docker_device)
+
+ (docker_port_maps)
+
+ (docker_shm_size)
+
+ (docker_extra_run_args)
+
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ """
+
+ return utils.call_internal_module(
+ self, __file__, 'module_misc', 'docker', i)
+
+ ##########################################################################
+
+ def _available_variations(self, i):
+ """
+ return error with available variations
+
+ Args:
+ (CM input dict):
+
+ meta (dict): meta of the script
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ 16 if not detected
+ * (error) (str): error string if return>0
+
+ """
+
+ meta = i['meta']
+
+ list_of_variations = sorted(
+ ['_' + v for v in list(meta.get('variations', {}.keys()))])
+
+ return {'return': 1, 'error': 'python package variation is not defined in "{}". Available: {}'.format(
+ meta['alias'], ' '.join(list_of_variations))}
+
+ ############################################################
+ def prepare(self, i):
+ """
+ Run CM script with --fake_run only to resolve deps
+ """
+
+ i['fake_run'] = True
+
+ return self.run(i)
+
+ ############################################################
+ # Reusable blocks for some scripts
+ def clean_some_tmp_files(self, i):
+ """
+ Clean tmp files
+ """
+
+ env = i.get('env', {})
+
+ cur_work_dir = env.get('CM_TMP_CURRENT_SCRIPT_WORK_PATH', '')
+ if cur_work_dir != '' and os.path.isdir(cur_work_dir):
+ for x in ['tmp-run.bat', 'tmp-state.json']:
+ xx = os.path.join(cur_work_dir, x)
+ if os.path.isfile(xx):
+ os.remove(xx)
+
+ return {'return': 0}
+
+
+##############################################################################
+def find_cached_script(i):
+ """
+ Internal automation function: find cached script
+
+ Args:
+ (CM input dict):
+
+ deps (dict): deps dict
+ update_deps (dict): key matches "names" in deps
+
+ Returns:
+ (CM return dict):
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+ """
+
+ import copy
+
+ recursion_spaces = i['recursion_spaces']
+ extra_recursion_spaces = i['extra_recursion_spaces']
+ script_tags = i['script_tags']
+ cached_tags = []
+ customize_code = i.get('customize_code')
+ customize_common_input = i.get('customize_common_input', {})
+ found_script_tags = i['found_script_tags']
+ variation_tags = i['variation_tags']
+ variation_tags_string = i['variation_tags_string']
+ explicit_variation_tags = i['explicit_variation_tags']
+ version = i['version']
+ version_min = i['version_min']
+ version_max = i['version_max']
+ extra_cache_tags = i['extra_cache_tags']
+ add_deps_recursive = i['add_deps_recursive']
+ new_cache_entry = i['new_cache_entry']
+ meta = i['meta']
+ env = i['env']
+ state = i['state']
+ const = i['const']
+ const_state = i['const_state']
+ self_obj = i['self']
+ skip_remembered_selections = i['skip_remembered_selections']
+ remembered_selections = i['remembered_selections']
+ quiet = i['quiet']
+ show_time = i.get('show_time', False)
+ search_tags = ''
+
+ verbose = i.get('verbose', False)
+ if not verbose:
+ verbose = i.get('v', False)
+
+ found_cached_scripts = []
+
+ logging.debug(
+ recursion_spaces +
+ ' - Checking if script execution is already cached ...')
+
+ # Create a search query to find that we already ran this script with the same or similar input
+ # It will be gradually enhanced with more "knowledge" ...
+ if len(script_tags) > 0:
+ for x in script_tags:
+ if x not in cached_tags:
+ cached_tags.append(x)
+
+ if len(found_script_tags) > 0:
+ for x in found_script_tags:
+ if x not in cached_tags:
+ cached_tags.append(x)
+
+ explicit_cached_tags = copy.deepcopy(cached_tags)
+
+ if len(explicit_variation_tags) > 0:
+ explicit_variation_tags_string = ''
+
+ for t in explicit_variation_tags:
+ if explicit_variation_tags_string != '':
+ explicit_variation_tags_string += ','
+ if t.startswith("-"):
+ x = "-_" + t[1:]
+ else:
+ x = '_' + t
+ explicit_variation_tags_string += x
+
+ if x not in explicit_cached_tags:
+ explicit_cached_tags.append(x)
+
+ logging.debug(
+ recursion_spaces +
+ ' - Prepared explicit variations: {}'.format(explicit_variation_tags_string))
+
+ if len(variation_tags) > 0:
+ variation_tags_string = ''
+
+ for t in variation_tags:
+ if variation_tags_string != '':
+ variation_tags_string += ','
+ if t.startswith("-"):
+ x = "-_" + t[1:]
+ else:
+ x = '_' + t
+ variation_tags_string += x
+
+ if x not in cached_tags:
+ cached_tags.append(x)
+
+ logging.debug(
+ recursion_spaces +
+ ' - Prepared variations: {}'.format(variation_tags_string))
+
+ # Add version
+ if version != '':
+ if 'version-' + version not in cached_tags:
+ cached_tags.append('version-' + version)
+ explicit_cached_tags.append('version-' + version)
+
+ # Add extra cache tags (such as "virtual" for python)
+ if len(extra_cache_tags) > 0:
+ for t in extra_cache_tags:
+ if t not in cached_tags:
+ cached_tags.append(t)
+ explicit_cached_tags.append(t)
+
+ # Add tags from deps (will be also duplicated when creating new cache
+ # entry)
+ extra_cache_tags_from_env = meta.get('extra_cache_tags_from_env', [])
+ for extra_cache_tags in extra_cache_tags_from_env:
+ key = extra_cache_tags['env']
+ prefix = extra_cache_tags.get('prefix', '')
+
+ v = env.get(key, '').strip()
+ if v != '':
+ for t in v.split(','):
+ x = 'deps-' + prefix + t
+ if x not in cached_tags:
+ cached_tags.append(x)
+ explicit_cached_tags.append(x)
+
+ # Check if already cached
+ if not new_cache_entry:
+ search_tags = '-tmp'
+ if len(cached_tags) > 0:
+ search_tags += ',' + ','.join(explicit_cached_tags)
+
+ logging.debug(
+ recursion_spaces +
+ ' - Searching for cached script outputs with the following tags: {}'.format(search_tags))
+
+ r = self_obj.cmind.access({'action': 'find',
+ 'automation': self_obj.meta['deps']['cache'],
+ 'tags': search_tags})
+ if r['return'] > 0:
+ return r
+
+ found_cached_scripts = r['list']
+
+ # Check if selection is remembered
+ if not skip_remembered_selections and len(found_cached_scripts) > 1:
+ # Need to add extra cached tags here (since recorded later)
+ for selection in remembered_selections:
+ if selection['type'] == 'cache' and set(
+ selection['tags'].split(',')) == set(search_tags.split(',')):
+ tmp_version_in_cached_script = selection['cached_script'].meta.get(
+ 'version', '')
+
+ skip_cached_script = check_versions(
+ self_obj.cmind, tmp_version_in_cached_script, version_min, version_max)
+
+ if skip_cached_script:
+ return {'return': 2, 'error': 'The version of the previously remembered selection for a given script ({}) mismatches the newly requested one'.format(
+ tmp_version_in_cached_script)}
+ else:
+ found_cached_scripts = [selection['cached_script']]
+ logging.debug(
+ recursion_spaces +
+ ' - Found remembered selection with tags "{}"!'.format(search_tags))
+ break
+
+ if len(found_cached_scripts) > 0:
+ selection = 0
+
+ # Check version ranges ...
+ new_found_cached_scripts = []
+
+ for cached_script in found_cached_scripts:
+ skip_cached_script = False
+ dependent_cached_path = cached_script.meta.get(
+ 'dependent_cached_path', '')
+ if dependent_cached_path:
+ if not os.path.exists(dependent_cached_path):
+ # TODO Need to restrict the below check to within container
+ # env
+ i['tmp_dep_cached_path'] = dependent_cached_path
+ r = utils.call_internal_module(
+ self_obj, __file__, 'module_misc', 'get_container_path_script', i)
+ if not os.path.exists(r['value_env']):
+ # Need to rm this cache entry
+ skip_cached_script = True
+ continue
+
+ os_info = self_obj.os_info
+
+ # Bat extension for this host OS
+ bat_ext = os_info['bat_ext']
+ script_path = i['found_script_path']
+ detected_version = None
+
+ if os.path.exists(os.path.join(script_path,
+ f"validate_cache{bat_ext}")):
+ run_script_input = {
+ 'path': script_path,
+ 'bat_ext': bat_ext,
+ 'os_info': os_info,
+ 'recursion_spaces': recursion_spaces,
+ 'tmp_file_run': self_obj.tmp_file_run,
+ 'self': self_obj,
+ 'meta': meta,
+ 'customize_code': customize_code,
+ 'customize_common_input': customize_common_input
+ }
+
+ deps = meta.get('deps')
+ if deps:
+ r = self_obj._call_run_deps(deps, self_obj.local_env_keys, meta.get('local_env_keys', []), env, state, const, const_state, add_deps_recursive,
+ recursion_spaces + extra_recursion_spaces,
+ remembered_selections, variation_tags_string, True, '', False, show_time, extra_recursion_spaces, {})
+ if r['return'] > 0:
+ return r
+
+ # Check if pre-process and detect
+ # if 'preprocess' in dir(customize_code):
+
+ # logging.debug(recursion_spaces + ' - Running preprocess ...')
+
+ # ii = copy.deepcopy(customize_common_input)
+ # ii['env'] = env
+ # ii['meta'] = meta
+ # # may need to detect versions in multiple paths
+ # ii['run_script_input'] = run_script_input
+
+ # r = customize_code.preprocess(ii)
+ # if r['return'] > 0:
+ # return r
+
+ ii = {
+ 'run_script_input': run_script_input,
+ 'env': env,
+ 'script_name': 'validate_cache',
+ 'detect_version': True
+ }
+ r = self_obj.run_native_script(ii)
+ # print(r)
+ if r['return'] > 0:
+ # return r
+ continue
+ if r.get('version'):
+ detected_version = r['version']
+
+ if not skip_cached_script:
+ cached_script_version = cached_script.meta.get('version', '')
+ if cached_script_version and detected_version and cached_script_version != detected_version:
+ continue
+
+ skip_cached_script = check_versions(
+ self_obj.cmind, cached_script_version, version_min, version_max)
+
+ if not skip_cached_script:
+ new_found_cached_scripts.append(cached_script)
+
+ found_cached_scripts = new_found_cached_scripts
+
+ return {'return': 0, 'cached_tags': cached_tags,
+ 'search_tags': search_tags, 'found_cached_scripts': found_cached_scripts}
+
+
+##############################################################################
+def enable_or_skip_script(meta, env):
+ """
+ Internal: enable a dependency based on enable_if_env and skip_if_env meta information
+ (AND function)
+ """
+
+ if not isinstance(meta, dict):
+ logging.info(
+ "The meta entry is not a dictionary for skip/enable if_env: %s",
+ meta)
+
+ for key in meta:
+ meta_key = [str(v).lower() for v in meta[key]]
+ if key in env:
+ value = str(env[key]).lower().strip()
+ if set(meta_key) & set(["yes", "on", "true", "1"]):
+ # Any set value other than false is taken as set
+ if value not in ["no", "off", "false", "0", ""]:
+ continue
+ elif set(meta_key) & set(["no", "off", "false", "0"]):
+ if value in ["no", "off", "false", "0", ""]:
+ continue
+ elif value in meta_key:
+ continue
+ else:
+ if set(meta_key) & set(["no", "off", "false", "0", ""]):
+ # If key is missing in env, and if the expected value is False,
+ # consider it a match
+ continue
+
+ return False
+
+ return True
+
+##############################################################################
+
+
+def any_enable_or_skip_script(meta, env):
+ """
+ Internal: enable a dependency based on enable_if_env and skip_if_env meta information
+ (OR function)
+ """
+ for key in meta:
+ found = False
+ if key in env:
+ value = str(env[key]).lower().strip()
+
+ meta_key = [str(v).lower() for v in meta[key]]
+
+ if set(meta_key) & set(["yes", "on", "true", "1"]):
+ if value not in ["no", "off", "false", "0", ""]:
+ found = True
+ elif set(meta_key) & set(["no", "off", "false", "0", ""]):
+ if value in ["no", "off", "false", "0", ""]:
+ found = True
+ elif value in meta_key:
+ found = True
+
+ # If found any match from the list (OR), return
+ if found:
+ return True
+
+ return False
+
+##########################################################################
+
+
+def _update_env(env, key=None, value=None):
+
+ if key is None or value is None:
+ return {
+ 'return': 1, 'error': 'None value not expected in key and value arguments in _update_env.'}
+ if not isinstance(key, str):
+ return {'return': 1, 'error': 'String value expected inside key argument.'}
+
+ env[key] = value
+
+ r = update_env_with_values(env)
+ if r['return'] > 0:
+ return r
+
+ return {'return': 0}
+
+
+##########################################################################
+def update_env_with_values(env, fail_on_not_found=False, extra_env=None):
+ """
+ Update any env key used as part of values in meta
+ """
+ import re
+
+ extra_env = extra_env or {} # Default to an empty dictionary if not provided
+
+ for key, value in env.items():
+ # Check for keys starting with "+" and ensure their values are lists
+ if key.startswith("+") and not isinstance(value, list):
+ return {'return': 1, 'error': f'List value expected for {key} in env'}
+
+ # Handle boolean values directly
+ if isinstance(value, bool):
+ env[key] = value
+ continue
+
+ # Search for placeholders like <<<...>>>
+ placeholders = re.findall(r'<<<(.*?)>>>', str(value))
+
+ # No placeholders found
+ if not placeholders:
+ # Special handling for CM_GIT_URL
+ if key == 'CM_GIT_URL' and env.get('CM_GIT_AUTH', "no") == "yes":
+ if env.get('CM_GH_TOKEN', '') and '@' not in env['CM_GIT_URL']:
+ params = {"token": env['CM_GH_TOKEN']}
+ value = get_git_url("token", value, params)
+ elif 'CM_GIT_SSH' in env:
+ value = get_git_url("ssh", value)
+ env[key] = value
+ continue
+
+ # Process each placeholder
+ for placeholder in placeholders:
+ if placeholder not in env and placeholder not in extra_env and fail_on_not_found:
+ return {'return': 1,
+ 'error': f'Variable {placeholder} is not in env'}
+
+ # Determine the source of the value
+ found_env = env if placeholder in env else extra_env if placeholder in extra_env else None
+ if found_env:
+ if isinstance(value, str):
+ value = value.replace(
+ f"<<<{placeholder}>>>", str(
+ found_env[placeholder]))
+ elif isinstance(value, list):
+ value = [
+ v.replace(
+ f"<<<{placeholder}>>>", str(
+ found_env[placeholder])) if isinstance(
+ v, str) else v for v in value]
+
+ env[key] = value
+
+ return {'return': 0}
+
+##############################################################################
+
+
+def check_version_constraints(i):
+ """
+ Internal: check version constaints and skip script artifact if constraints are not met
+ """
+
+ detected_version = i['detected_version']
+
+ version = i.get('version', '')
+ version_min = i.get('version_min', '')
+ version_max = i.get('version_max', '')
+
+ cmind = i['cmind']
+
+ skip = False
+
+ if version != '' and version != detected_version:
+ skip = True
+
+ if not skip and detected_version != '' and version_min != '':
+ ry = cmind.access({'action': 'compare_versions',
+ 'automation': 'utils,dc2743f8450541e3',
+ 'version1': detected_version,
+ 'version2': version_min})
+ if ry['return'] > 0:
+ return ry
+
+ if ry['comparison'] < 0:
+ skip = True
+
+ if not skip and detected_version != '' and version_max != '':
+ ry = cmind.access({'action': 'compare_versions',
+ 'automation': 'utils,dc2743f8450541e3',
+ 'version1': detected_version,
+ 'version2': version_max})
+ if ry['return'] > 0:
+ return ry
+
+ if ry['comparison'] > 0:
+ skip = True
+
+ return {'return': 0, 'skip': skip}
+
+
+##############################################################################
+def prepare_and_run_script_with_postprocessing(i, postprocess="postprocess"):
+ """
+ Internal: prepare and run script with postprocessing that can be reused for version check
+ """
+
+ path = i['path']
+ bat_ext = i['bat_ext']
+ os_info = i['os_info']
+ customize_code = i.get('customize_code', None)
+ customize_common_input = i.get('customize_common_input', {})
+
+ env = i.get('env', {})
+ const = i.get('const', {})
+ state = i.get('state', {})
+ const_state = i.get('const_state', {})
+ run_state = i.get('run_state', {})
+ verbose = i.get('verbose', False)
+ if not verbose:
+ verbose = i.get('v', False)
+
+ show_time = i.get('time', False)
+
+ recursion = i.get('recursion', False)
+ found_script_tags = i.get('found_script_tags', [])
+ debug_script_tags = i.get('debug_script_tags', '')
+
+ meta = i.get('meta', {})
+
+ reuse_cached = i.get('reused_cached', False)
+ recursion_spaces = i.get('recursion_spaces', '')
+
+ tmp_file_run_state = i.get('tmp_file_run_state', '')
+ tmp_file_run_env = i.get('tmp_file_run_env', '')
+ tmp_file_state = i.get('tmp_file_state', '')
+ tmp_file_run = i['tmp_file_run']
+ local_env_keys = i.get('local_env_keys', [])
+ local_env_keys_from_meta = i.get('local_env_keys_from_meta', [])
+ posthook_deps = i.get('posthook_deps', [])
+ add_deps_recursive = i.get('add_deps_recursive', {})
+ recursion_spaces = i['recursion_spaces']
+ remembered_selections = i.get('remembered_selections', [])
+ variation_tags_string = i.get('variation_tags_string', '')
+ found_cached = i.get('found_cached', False)
+ script_automation = i['self']
+
+ repro_prefix = i.get('repro_prefix', '')
+
+ # Prepare script name
+ check_if_run_script_exists = False
+ script_name = i.get('script_name', '').strip()
+ if script_name == '':
+ script_name = meta.get('script_name', '').strip()
+ if script_name != '':
+ # Script name was added by user - we need to check that it really
+ # exists (on Linux or Windows)
+ check_if_run_script_exists = True
+ if script_name == '':
+ # Here is the default script name - if it doesn't exist, we skip it.
+ # However, if it's explicitly specified, we check it and report
+ # if it's missing ...
+ script_name = 'run'
+
+ if bat_ext == '.sh':
+ run_script = get_script_name(env, path, script_name)
+ else:
+ run_script = script_name + bat_ext
+
+ path_to_run_script = os.path.join(path, run_script)
+
+ if check_if_run_script_exists and not os.path.isfile(path_to_run_script):
+ return {
+ 'return': 16, 'error': 'script {} not found - please add one'.format(path_to_run_script)}
+
+ # Update env and state with const
+ utils.merge_dicts({'dict1': env,
+ 'dict2': const,
+ 'append_lists': True,
+ 'append_unique': True})
+ utils.merge_dicts({'dict1': state, 'dict2': const_state,
+ 'append_lists': True, 'append_unique': True})
+
+ # Update env with the current path
+ if os_info['platform'] == 'windows' and ' ' in path:
+ path = '"' + path + '"'
+
+ cur_dir = os.getcwd()
+
+ r = _update_env(env, 'CM_TMP_CURRENT_SCRIPT_PATH', path)
+ if r['return'] > 0:
+ return r
+
+ r = _update_env(env, 'CM_TMP_CURRENT_SCRIPT_WORK_PATH', cur_dir)
+ if r['return'] > 0:
+ return r
+
+ # Record state
+ if tmp_file_state != '':
+ r = utils.save_json(file_name=tmp_file_state, meta=state)
+ if r['return'] > 0:
+ return r
+
+ rr = {'return': 0}
+
+ # If batch file exists, run it with current env and state
+ if os.path.isfile(path_to_run_script) and not reuse_cached:
+ if tmp_file_run_state != '' and os.path.isfile(tmp_file_run_state):
+ os.remove(tmp_file_run_state)
+ if tmp_file_run_env != '' and os.path.isfile(tmp_file_run_env):
+ os.remove(tmp_file_run_env)
+
+ run_script = tmp_file_run + bat_ext
+ run_script_without_cm = tmp_file_run + '-without-cm' + bat_ext
+
+ logging.debug(
+ recursion_spaces +
+ ' - Running native script "{}" from temporal script "{}" in "{}" ...'.format(
+ path_to_run_script,
+ run_script,
+ cur_dir))
+ if not run_state.get('tmp_silent', False):
+ logging.info(recursion_spaces + ' ! cd {}'.format(cur_dir))
+ logging.info(
+ recursion_spaces +
+ ' ! call {} from {}'.format(
+ path_to_run_script,
+ run_script))
+
+ # Prepare env variables
+ import copy
+ script = copy.deepcopy(os_info['start_script'])
+
+ # Check if script_prefix in the state from other components
+ script_prefix = state.get('script_prefix', [])
+ if len(script_prefix) > 0:
+ # script = script_prefix + ['\n'] + script
+ script += script_prefix + ['\n']
+
+ script += convert_env_to_script(env, os_info)
+
+# # Check if run bash/cmd before running the command (for debugging)
+# if debug_script_tags !='' and all(item in found_script_tags for item in debug_script_tags.split(',')):
+# # Copy original run script to be able to run it outside ...
+# x=['cmd', '.', '','.bat'] if os_info['platform'] == 'windows' else ['bash', ' ""', '"','.sh']
+#
+# script.append('\n')
+# script.append('echo{}\n'.format(x[1]))
+# script.append('echo {}Running debug shell. Type exit to resume script execution ...{}\n'.format(x[2],x[2]))
+# script.append('echo{}\n'.format(x[1]))
+# script.append('\n')
+# script.append(x[0])
+
+ # Append batch file to the tmp script
+ script.append('\n')
+ script.append(
+ os_info['run_bat'].replace(
+ '${bat_file}',
+ '"' +
+ path_to_run_script +
+ '"') +
+ '\n')
+
+ # Prepare and run script
+ r = record_script(run_script, script, os_info)
+ if r['return'] > 0:
+ return r
+
+ # Save file to run without CM
+ if debug_script_tags != '' and all(
+ item in found_script_tags for item in debug_script_tags.split(',')):
+
+ import shutil
+ shutil.copy(run_script, run_script_without_cm)
+
+ logging.info(
+ '================================================================================')
+ logging.info(
+ 'Debug script to run without CM was recorded: {}'.format(run_script_without_cm))
+ logging.info(
+ '================================================================================')
+
+ # Run final command
+ cmd = os_info['run_local_bat_from_python'].replace(
+ '${bat_file}', run_script)
+
+ rc = os.system(cmd)
+
+ if rc > 0 and not i.get('ignore_script_error', False):
+ # Check if print files when error
+ print_files = meta.get('print_files_if_script_error', [])
+
+ if len(print_files) > 0:
+ for pr in print_files:
+ if os.path.isfile(pr):
+ r = utils.load_txt(file_name=pr)
+ if r['return'] == 0:
+ logging.info(
+ "========================================================")
+ logging.info("Print file {}:".format(pr))
+ logging.info("")
+ logging.info(r['string'])
+ logging.info("")
+
+ # Check where to report errors and failures
+ repo_to_report = run_state.get(
+ 'script_entry_repo_to_report_errors', '')
+
+ if repo_to_report == '':
+ script_repo_alias = run_state.get('script_repo_alias', '')
+ script_repo_git = run_state.get('script_repo_git', False)
+
+ if script_repo_git and script_repo_alias != '':
+ repo_to_report = 'https://github.com/' + \
+ script_repo_alias.replace('@', '/') + '/issues'
+
+ if repo_to_report == '':
+ repo_to_report = 'https://github.com/mlcommons/cm4mlops/issues'
+
+ note = '''
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Note that it is often a portability issue of a third-party tool or a native script
+wrapped and unified by this CM script (automation recipe). Please re-run
+this script with --repro flag and report this issue with the original
+command line, cm-repro directory and full log here:
+
+{}
+
+The CM concept is to collaboratively fix such issues inside portable CM scripts
+to make existing tools and native scripts more portable, interoperable
+and deterministic. Thank you'''.format(repo_to_report)
+
+ rr = {
+ 'return': 2,
+ 'error': 'Portable CM script failed (name = {}, return code = {})\n\n{}'.format(
+ meta['alias'],
+ rc,
+ note)}
+
+ if repro_prefix != '':
+ dump_repro(repro_prefix, rr, run_state)
+
+ return rr
+
+ # Load updated state if exists
+ if tmp_file_run_state != '' and os.path.isfile(tmp_file_run_state):
+ r = utils.load_json(file_name=tmp_file_run_state)
+ if r['return'] > 0:
+ return r
+
+ updated_state = r['meta']
+
+ utils.merge_dicts({'dict1': state,
+ 'dict2': updated_state,
+ 'append_lists': True,
+ 'append_unique': True})
+
+ # Load updated env if exists
+ if tmp_file_run_env != '' and os.path.isfile(tmp_file_run_env):
+ r = utils.load_txt(file_name=tmp_file_run_env)
+ if r['return'] > 0:
+ return r
+
+ r = utils.convert_env_to_dict(r['string'])
+ if r['return'] > 0:
+ return r
+
+ updated_env = r['dict']
+
+ utils.merge_dicts({'dict1': env,
+ 'dict2': updated_env,
+ 'append_lists': True,
+ 'append_unique': True})
+
+ if postprocess != '' and customize_code is not None and postprocess in dir(
+ customize_code):
+ if not run_state.get('tmp_silent', False):
+ logging.info(
+ recursion_spaces +
+ ' ! call "{}" from {}'.format(
+ postprocess,
+ customize_code.__file__))
+
+ if len(posthook_deps) > 0 and (postprocess == "postprocess"):
+ r = script_automation._call_run_deps(posthook_deps, local_env_keys, local_env_keys_from_meta, env, state, const, const_state,
+ add_deps_recursive, recursion_spaces, remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, ' ', run_state)
+ if r['return'] > 0:
+ return r
+
+ if (postprocess == "postprocess") and customize_code is not None and 'postprocess' in dir(
+ customize_code):
+ rr = run_postprocess(customize_code, customize_common_input, recursion_spaces, env, state, const,
+ const_state, meta, verbose, i) # i as run_script_input
+ elif (postprocess == "detect_version") and customize_code is not None and 'detect_version' in dir(customize_code):
+ rr = run_detect_version(customize_code, customize_common_input, recursion_spaces, env, state, const,
+ const_state, meta, verbose)
+
+ return rr
+
+##############################################################################
+
+
+def run_detect_version(customize_code, customize_common_input,
+ recursion_spaces, env, state, const, const_state, meta, verbose=False):
+
+ if customize_code is not None and 'detect_version' in dir(customize_code):
+ import copy
+
+ logging.debug(recursion_spaces + ' - Running detect_version ...')
+
+ # Update env and state with const
+ utils.merge_dicts({'dict1': env, 'dict2': const,
+ 'append_lists': True, 'append_unique': True})
+ utils.merge_dicts({'dict1': state,
+ 'dict2': const_state,
+ 'append_lists': True,
+ 'append_unique': True})
+
+ ii = copy.deepcopy(customize_common_input)
+ ii['env'] = env
+ ii['state'] = state
+ ii['meta'] = meta
+
+ r = customize_code.detect_version(ii)
+ return r
+
+ return {'return': 0}
+
+##############################################################################
+
+
+def run_postprocess(customize_code, customize_common_input, recursion_spaces,
+ env, state, const, const_state, meta, verbose=False, run_script_input=None):
+
+ if customize_code is not None and 'postprocess' in dir(customize_code):
+ import copy
+
+ logging.debug(recursion_spaces + ' - Running postprocess ...')
+
+ # Update env and state with const
+ utils.merge_dicts({'dict1': env, 'dict2': const,
+ 'append_lists': True, 'append_unique': True})
+ utils.merge_dicts({'dict1': state,
+ 'dict2': const_state,
+ 'append_lists': True,
+ 'append_unique': True})
+
+ ii = copy.deepcopy(customize_common_input)
+ ii['env'] = env
+ ii['state'] = state
+ ii['meta'] = meta
+
+ if run_script_input is not None:
+ ii['run_script_input'] = run_script_input
+
+ r = customize_code.postprocess(ii)
+ return r
+
+ return {'return': 0}
+
+##############################################################################
+
+
+def get_script_name(env, path, script_name='run'):
+ """
+ Internal: find the most appropriate run script name for the detected OS
+ """
+
+ from os.path import exists
+
+ tmp_suff1 = env.get('CM_HOST_OS_FLAVOR', '')
+ tmp_suff2 = env.get('CM_HOST_OS_VERSION', '')
+ tmp_suff3 = env.get('CM_HOST_PLATFORM_FLAVOR', '')
+
+ if exists(os.path.join(path, script_name + '-' + tmp_suff1 +
+ '-' + tmp_suff2 + '-' + tmp_suff3 + '.sh')):
+ return script_name + '-' + tmp_suff1 + '-' + tmp_suff2 + '-' + tmp_suff3 + '.sh'
+ elif exists(os.path.join(path, script_name + '-' + tmp_suff1 + '-' + tmp_suff3 + '.sh')):
+ return script_name + '-' + tmp_suff1 + '-' + tmp_suff3 + '.sh'
+ elif exists(os.path.join(path, script_name + '-' + tmp_suff1 + '-' + tmp_suff2 + '.sh')):
+ return script_name + '-' + tmp_suff1 + '-' + tmp_suff2 + '.sh'
+ elif exists(os.path.join(path, script_name + '-' + tmp_suff1 + '.sh')):
+ return script_name + '-' + tmp_suff1 + '.sh'
+ elif exists(os.path.join(path, script_name + '-' + tmp_suff3 + '.sh')):
+ return script_name + '-' + tmp_suff3 + '.sh'
+ else:
+ return script_name + '.sh'
+
+##############################################################################
+
+
+def update_env_keys(env, env_key_mappings):
+ """
+ Internal: convert env keys as per the given mapping
+ """
+
+ for key_prefix in env_key_mappings:
+ for key in list(env):
+ if key.startswith(key_prefix):
+ new_key = key.replace(key_prefix, env_key_mappings[key_prefix])
+ env[new_key] = env[key]
+ # del(env[key])
+
+##############################################################################
+
+
+def convert_env_to_script(env, os_info, start_script=None):
+ """
+ Internal: Convert env to script for a given platform.
+ """
+ import copy
+
+ # Initialize script with a deep copy of the start_script or an empty list
+ script = copy.deepcopy(start_script) if start_script else []
+
+ # Determine if the platform is Windows
+ is_windows = os_info['platform'] == 'windows'
+
+ for k in sorted(env):
+ env_value = env[k]
+
+ # Handle Windows-specific value processing
+ if is_windows:
+ if not isinstance(env_value, list):
+ env_value = [env_value]
+
+ processed_values = []
+ for v in env_value:
+ v_str = str(v)
+ if '"' not in v_str:
+ # Add quotes if special characters are present
+ if any(char in v_str for char in ['|', '&', '>', '<']):
+ v_str = f'"{v_str}"'
+ processed_values.append(v_str)
+
+ env_value = processed_values if isinstance(
+ env[k], list) else processed_values[0]
+
+ # Process special keys
+ key = k
+ if k.startswith('+'):
+ key = k[1:]
+ env_separator = os_info.get('env_separator', ';')
+
+ # Custom separator if key starts with a non-alphanumeric character
+ if not key[0].isalnum():
+ env_separator = key[0]
+ key = key[1:]
+
+ # Append the existing environment variable to the new value
+ env_value = f"{env_separator.join(env_value)}{env_separator}{os_info['env_var'].replace('env_var', key)}"
+
+ # Replace placeholders in the platform-specific environment command
+ env_command = os_info['set_env'].replace(
+ '${key}', key).replace(
+ '${value}', str(env_value))
+ script.append(env_command)
+
+ return script
+
+##############################################################################
+
+
+def record_script(run_script, script, os_info):
+ """
+ Internal: record script and chmod 755 on Linux
+ """
+
+ final_script = '\n'.join(script)
+
+ if not final_script.endswith('\n'):
+ final_script += '\n'
+
+ r = utils.save_txt(file_name=run_script, string=final_script)
+ if r['return'] > 0:
+ return r
+
+ if os_info.get('set_exec_file', '') != '':
+ cmd = os_info['set_exec_file'].replace('${file_name}', run_script)
+ rc = os.system(cmd)
+
+ return {'return': 0}
+
+##############################################################################
+
+
+def clean_tmp_files(clean_files, recursion_spaces):
+ """
+ Internal: clean tmp files
+ """
+
+# logging.info('')
+# logging.info(recursion_spaces+' - cleaning files {} ...'.format(clean_files))
+
+ for tmp_file in clean_files:
+ if os.path.isfile(tmp_file):
+ os.remove(tmp_file)
+
+ return {'return': 0}
+
+##############################################################################
+
+
+def update_dynamic_env_values(mydict, env):
+ """
+ Internal: update the dynamic value in given dict identified by <<<...>>> with the value from env dictionary if set
+ """
+ import re
+ # Regular expression to match <<>>
+ pattern = re.compile(r'<<<(.*?)>>>')
+
+ def replace_variables(value):
+ # Function to replace the <<>> with corresponding value from
+ # env
+ if isinstance(value, str): # Only process if the value is a string
+ matches = pattern.findall(value)
+ for match in matches:
+ if match in env: # Replace only if the variable is in env
+ value = value.replace(f'<<<{match}>>>', str(env[match]))
+ return value
+
+ # Recursively update the dictionary
+ for key, val in mydict.items():
+ if isinstance(val, dict):
+ # If the value is a dictionary, recurse into it
+ update_dynamic_env_values(val, env)
+ else:
+ # Replace variables in the current value
+ mydict[key] = replace_variables(val)
+
+ return
+
+
+##############################################################################
+def update_dep_info(dep, new_info):
+ """
+ Internal: Add additional info to a dependency.
+ """
+ for info, value in new_info.items():
+
+ if info == "tags":
+ # Process tags
+ existing_tags = dep.get('tags', '').split(",")
+ new_tags = value.split(",")
+ # Filter and combine unique tags
+ filtered_new_tags = [tag for tag in new_tags if "<<<" not in tag]
+ combined_tags = existing_tags + \
+ list(set(filtered_new_tags) - set(existing_tags))
+ dep['tags'] = ",".join(combined_tags)
+
+ elif "enable_if_" in info or "skip_if_" in info:
+ # Skip special cases meant for conditions
+ continue
+
+ elif isinstance(value, dict):
+ # Merge dictionaries
+ dep.setdefault(info, {})
+ if isinstance(dep[info], dict):
+ utils.merge_dicts({
+ 'dict1': dep[info],
+ 'dict2': value,
+ 'append_lists': True,
+ 'append_unique': True
+ })
+ # Optional: Throw an error if types are mismatched
+ # else:
+ # raise ValueError(f"Cannot merge non-dict type into dict for key '{info}'")
+
+ elif isinstance(value, list):
+ # Merge lists
+ dep.setdefault(info, [])
+ if isinstance(dep[info], list):
+ dep[info].extend(value)
+ # Optional: Throw an error if types are mismatched
+ # else:
+ # raise ValueError(f"Cannot append non-list type into list for key '{info}'")
+
+ else:
+ # Overwrite or set other types of values
+ dep[info] = value
+
+
+##############################################################################
+
+def update_deps(deps, add_deps, fail_error=False, env={}):
+ """
+ Internal: add deps tags, version etc. by name
+ """
+ # deps_info_to_add = [ "version", "version_min", "version_max",
+ # "version_max_usable", "path", "tags", .... ]
+ new_deps_info = {}
+ for new_dep_name in add_deps:
+ if is_dep_tobe_skipped(add_deps[new_dep_name], env):
+ continue
+ dep_found = False
+ for dep in deps:
+ names = dep.get('names', [])
+ if new_dep_name in names:
+ update_dynamic_env_values(add_deps[new_dep_name], env)
+ update_dep_info(dep, add_deps[new_dep_name])
+ dep_found = True
+ if not dep_found and fail_error:
+ return {'return': 1, 'error': new_dep_name +
+ ' is not one of the dependency'}
+
+ return {'return': 0}
+
+
+##############################################################################
+def append_deps(deps, new_deps):
+ """
+ Internal: add deps from meta
+ """
+
+ for new_dep in new_deps:
+ existing = False
+ new_dep_names = new_dep.get('names', [])
+ if len(new_dep_names) > 0:
+ for i in range(len(deps)):
+ dep = deps[i]
+ dep_names = dep.get('names', [])
+ if len(dep_names) > 0:
+ if set(new_dep_names) == set(dep_names):
+ deps[i] = new_dep
+ existing = True
+ break
+ else: # when no name, check for tags
+ new_dep_tags = new_dep.get('tags')
+ new_dep_tags_list = new_dep_tags.split(",")
+ for i in range(len(deps)):
+ dep = deps[i]
+ dep_tags_list = dep.get('tags').split(",")
+ if set(new_dep_tags_list) == set(dep_tags_list):
+ deps[i] = new_dep
+ existing = True
+ break
+
+ if not existing:
+ deps.append(new_dep)
+
+ return {'return': 0}
+
+##############################################################################
+
+
+def is_dep_tobe_skipped(d, env):
+ """
+ Internal: check if this dependency is to be skipped
+ """
+ if d.get('skip_if_fake_run', False) and env.get(
+ 'CM_TMP_FAKE_RUN', '') == 'yes':
+ return True
+
+ if "enable_if_env" in d:
+ if not enable_or_skip_script(d["enable_if_env"], env):
+ return True
+
+ if "enable_if_any_env" in d:
+ if not any_enable_or_skip_script(d["enable_if_any_env"], env):
+ return True
+
+ if "skip_if_env" in d:
+ if enable_or_skip_script(d["skip_if_env"], env):
+ return True
+
+ if "skip_if_any_env" in d:
+ if any_enable_or_skip_script(d["skip_if_any_env"], env):
+ return True
+
+ return False
+
+##############################################################################
+
+
+def update_deps_from_input(deps, post_deps, prehook_deps, posthook_deps, i):
+ """
+ Internal: update deps from meta
+ """
+ add_deps_info_from_input = i.get('ad', {})
+ if not add_deps_info_from_input:
+ add_deps_info_from_input = i.get('add_deps', {})
+ else:
+ utils.merge_dicts({'dict1': add_deps_info_from_input, 'dict2': i.get(
+ 'add_deps', {}), 'append_lists': True, 'append_unique': True})
+
+ add_deps_recursive_info_from_input = i.get('adr', {})
+ if not add_deps_recursive_info_from_input:
+ add_deps_recursive_info_from_input = i.get('add_deps_recursive', {})
+ else:
+ utils.merge_dicts({'dict1': add_deps_recursive_info_from_input, 'dict2': i.get(
+ 'add_deps_recursive', {}), 'append_lists': True, 'append_unique': True})
+
+ env = i.get('env', {})
+
+ if add_deps_info_from_input:
+ r1 = update_deps(deps, add_deps_info_from_input, True, env)
+ r2 = update_deps(post_deps, add_deps_info_from_input, True, env)
+ r3 = update_deps(prehook_deps, add_deps_info_from_input, True, env)
+ r4 = update_deps(posthook_deps, add_deps_info_from_input, True, env)
+ if r1['return'] > 0 and r2['return'] > 0 and r3['return'] > 0 and r4['return'] > 0:
+ return r1
+ if add_deps_recursive_info_from_input:
+ update_deps(deps, add_deps_recursive_info_from_input, False, env)
+ update_deps(post_deps, add_deps_recursive_info_from_input, False, env)
+ update_deps(
+ prehook_deps,
+ add_deps_recursive_info_from_input,
+ False,
+ env)
+ update_deps(
+ posthook_deps,
+ add_deps_recursive_info_from_input,
+ False,
+ env)
+
+ return {'return': 0}
+
+
+##############################################################################
+def update_env_from_input_mapping(env, inp, input_mapping):
+ """
+ Internal: update env from input and input_mapping
+ """
+ for key in input_mapping:
+ if key in inp:
+ env[input_mapping[key]] = inp[key]
+
+##############################################################################
+
+
+def update_state_from_meta(meta, env, state, const, const_state, deps, post_deps,
+ prehook_deps, posthook_deps, new_env_keys, new_state_keys, run_state, i):
+ """
+ Internal: update env and state from meta
+ """
+
+ default_env = meta.get('default_env', {})
+ for key in default_env:
+ env.setdefault(key, default_env[key])
+
+ update_env = meta.get('env', {})
+ env.update(update_env)
+
+ update_meta_if_env = meta.get('update_meta_if_env', [])
+ update_meta_if_env_from_state = run_state.get('update_meta_if_env', [])
+ run_state['update_meta_if_env'] = update_meta_if_env + \
+ update_meta_if_env_from_state
+
+ for c_meta in run_state['update_meta_if_env']:
+ if is_dep_tobe_skipped(c_meta, env):
+ continue
+ utils.merge_dicts({'dict1': env, 'dict2': c_meta.get(
+ 'env', {}), 'append_lists': True, 'append_unique': True})
+ utils.merge_dicts({'dict1': state, 'dict2': c_meta.get(
+ 'state', {}), 'append_lists': True, 'append_unique': True})
+ if c_meta.get('docker', {}):
+ if not state.get('docker', {}):
+ state['docker'] = {}
+ utils.merge_dicts({'dict1': state['docker'],
+ 'dict2': c_meta['docker'],
+ 'append_lists': True,
+ 'append_unique': True})
+
+ update_const = meta.get('const', {})
+ if update_const:
+ const.update(update_const)
+ env.update(const)
+
+ update_state = meta.get('state', {})
+ utils.merge_dicts({'dict1': state, 'dict2': update_state,
+ 'append_lists': True, 'append_unique': True})
+
+ update_const_state = meta.get('const_state', {})
+ if const_state:
+ utils.merge_dicts({'dict1': const_state,
+ 'dict2': update_const_state,
+ 'append_lists': True,
+ 'append_unique': True})
+ utils.merge_dicts({'dict1': state,
+ 'dict2': const_state,
+ 'append_lists': True,
+ 'append_unique': True})
+
+ new_deps = meta.get('deps', [])
+ if len(new_deps) > 0:
+ append_deps(deps, new_deps)
+
+ new_post_deps = meta.get("post_deps", [])
+ if len(new_post_deps) > 0:
+ append_deps(post_deps, new_post_deps)
+
+ new_prehook_deps = meta.get("prehook_deps", [])
+ if len(new_prehook_deps) > 0:
+ append_deps(prehook_deps, new_prehook_deps)
+
+ new_posthook_deps = meta.get("posthook_deps", [])
+ if len(new_posthook_deps) > 0:
+ append_deps(posthook_deps, new_posthook_deps)
+
+ add_deps_info = meta.get('ad', {})
+ if not add_deps_info:
+ add_deps_info = meta.get('add_deps', {})
+ else:
+ utils.merge_dicts({'dict1': add_deps_info, 'dict2': meta.get(
+ 'add_deps', {}), 'append_lists': True, 'append_unique': True})
+ if add_deps_info:
+ r1 = update_deps(deps, add_deps_info, True, env)
+ r2 = update_deps(post_deps, add_deps_info, True, env)
+ r3 = update_deps(prehook_deps, add_deps_info, True, env)
+ r4 = update_deps(posthook_deps, add_deps_info, True, env)
+ if r1['return'] > 0 and r2['return'] > 0 and r3['return'] > 0 and r4['return'] > 0:
+ return r1
+
+ # i would have 'input' when called through cm.access
+ input_update_env = i.get('input', i)
+
+ input_mapping = meta.get('input_mapping', {})
+ if input_mapping:
+ update_env_from_input_mapping(env, input_update_env, input_mapping)
+
+ # handle dynamic env values
+ r = update_env_with_values(env)
+ if r['return'] > 0:
+ return r
+
+ # Possibly restrict this to within docker environment
+ # we need to see input here
+ add_deps_info = meta.get('ad', i.get('ad', {}))
+ if not add_deps_info:
+ add_deps_info = meta.get('add_deps', i.get('add_deps_recursive', {}))
+ else:
+ utils.merge_dicts({'dict1': add_deps_info, 'dict2': meta.get(
+ 'add_deps', {}), 'append_lists': True, 'append_unique': True})
+
+ new_docker_settings = meta.get('docker')
+ if new_docker_settings:
+ docker_settings = state.get('docker', {})
+ # docker_input_mapping = docker_settings.get('docker_input_mapping', {})
+ # new_docker_input_mapping = new_docker_settings.get('docker_input_mapping', {})
+ # if new_docker_input_mapping:
+ # # update_env_from_input_mapping(env, i['input'], docker_input_mapping)
+ # utils.merge_dicts({'dict1':docker_input_mapping, 'dict2':new_docker_input_mapping, 'append_lists':True, 'append_unique':True})
+ utils.merge_dicts({'dict1': docker_settings,
+ 'dict2': new_docker_settings,
+ 'append_lists': True,
+ 'append_unique': True})
+ if docker_settings.get('deps', []):
+ update_deps(docker_settings['deps'], add_deps_info, False, env)
+ state['docker'] = docker_settings
+
+ new_env_keys_from_meta = meta.get('new_env_keys', [])
+ if new_env_keys_from_meta:
+ new_env_keys += new_env_keys_from_meta
+
+ new_state_keys_from_meta = meta.get('new_state_keys', [])
+ if new_state_keys_from_meta:
+ new_state_keys += new_state_keys_from_meta
+
+ return {'return': 0}
+
+##############################################################################
+
+
+def update_adr_from_meta(deps, post_deps, prehook_deps,
+ posthook_deps, add_deps_recursive_info, env={}):
+ """
+ Internal: update add_deps_recursive from meta
+ """
+ if add_deps_recursive_info:
+ update_deps(deps, add_deps_recursive_info, False, env)
+ update_deps(post_deps, add_deps_recursive_info, False, env)
+ update_deps(prehook_deps, add_deps_recursive_info, False, env)
+ update_deps(posthook_deps, add_deps_recursive_info, False, env)
+
+ return {'return': 0}
+
+##############################################################################
+
+
+def get_adr(meta):
+ add_deps_recursive_info = meta.get('adr', {})
+ if not add_deps_recursive_info:
+ add_deps_recursive_info = meta.get('add_deps_recursive', {})
+ else:
+ utils.merge_dicts({'dict1': add_deps_recursive_info, 'dict2': meta.get(
+ 'add_deps_recursive', {}), 'append_lists': True, 'append_unique': True})
+ return add_deps_recursive_info
+
+##############################################################################
+
+
+def detect_state_diff(env, saved_env, new_env_keys,
+ new_state_keys, state, saved_state):
+ """
+ Internal: detect diff in env and state
+ """
+
+ new_env = {}
+ new_state = {}
+
+ # Check if leave only specific keys or detect diff automatically
+ for k in new_env_keys:
+ if '?' in k or '*' in k:
+ import fnmatch
+ for kk in env:
+ if fnmatch.fnmatch(kk, k):
+ new_env[kk] = env[kk]
+ elif k in env:
+ new_env[k] = env[k]
+ elif "<<<" in k:
+ import re
+ tmp_values = re.findall(r'<<<(.*?)>>>', k)
+ for tmp_value in tmp_values:
+ if tmp_value in env:
+ value = env[tmp_value]
+ if value in env:
+ new_env[value] = env[value]
+
+ for k in new_state_keys:
+ if '?' in k or '*' in k:
+ import fnmatch
+ for kk in state:
+ if fnmatch.fnmatch(kk, k):
+ new_state[kk] = state[kk]
+ elif k in state:
+ new_state[k] = state[k]
+ elif "<<<" in k:
+ import re
+ tmp_values = re.findall(r'<<<(.*?)>>>', k)
+ for tmp_value in tmp_values:
+ if tmp_value in state:
+ value = state[tmp_value]
+ if value in state:
+ new_state[value] = state[value]
+
+ return {'return': 0, 'env': env, 'new_env': new_env,
+ 'state': state, 'new_state': new_state}
+
+##############################################################################
+
+
+def select_script_artifact(lst, text, recursion_spaces,
+ can_skip, script_tags_string, quiet, verbose):
+ """
+ Internal: select script
+ """
+
+ string1 = recursion_spaces + \
+ ' - More than 1 {} found for "{}":'.format(text, script_tags_string)
+
+ # If quiet, select 0 (can be sorted for determinism)
+ if quiet:
+ logging.debug(string1)
+ logging.debug('Selected default due to "quiet" mode')
+
+ return 0
+
+ # Select 1 and proceed
+ logging.info(string1)
+ num = 0
+
+ for a in lst:
+ meta = a.meta
+
+ name = meta.get('name', '')
+
+ s = a.path
+ if name != '':
+ s = '"' + name + '" ' + s
+
+ x = recursion_spaces + \
+ ' {}) {} ({})'.format(num, s, ','.join(meta['tags']))
+
+ version = meta.get('version', '')
+ if version != '':
+ x += ' (Version {})'.format(version)
+
+ logging.info(x)
+ num += 1
+
+ s = 'Make your selection or press Enter for 0'
+ if can_skip:
+ s += ' or use -1 to skip'
+
+ x = input(recursion_spaces + ' ' + s + ': ')
+ x = x.strip()
+ if x == '':
+ x = '0'
+
+ selection = int(x)
+
+ if selection < 0 and not can_skip:
+ selection = 0
+
+ if selection < 0:
+ logging.info(recursion_spaces + ' Skipped')
+ else:
+ if selection >= num:
+ selection = 0
+ logging.info(
+ recursion_spaces +
+ ' Selected {}: {}'.format(
+ selection,
+ lst[selection].path))
+
+ return selection
+
+##############################################################################
+
+
+def check_versions(cmind, cached_script_version, version_min, version_max):
+ """
+ Internal: check versions of the cached script
+ """
+ skip_cached_script = False
+
+ if cached_script_version != '':
+ if version_min != '':
+ ry = cmind.access({'action': 'compare_versions',
+ 'automation': 'utils,dc2743f8450541e3',
+ 'version1': cached_script_version,
+ 'version2': version_min})
+ if ry['return'] > 0:
+ return ry
+
+ if ry['comparison'] < 0:
+ skip_cached_script = True
+
+ if not skip_cached_script and version_max != '':
+ ry = cmind.access({'action': 'compare_versions',
+ 'automation': 'utils,dc2743f8450541e3',
+ 'version1': cached_script_version,
+ 'version2': version_max})
+ if ry['return'] > 0:
+ return ry
+
+ if ry['comparison'] > 0:
+ skip_cached_script = True
+
+ return skip_cached_script
+
+##############################################################################
+
+
+def get_git_url(get_type, url, params={}):
+ from giturlparse import parse
+ p = parse(url)
+ if get_type == "ssh":
+ return p.url2ssh
+ elif get_type == "token":
+ token = params['token']
+ return "https://git:" + token + "@" + p.host + "/" + p.owner + "/" + p.repo
+ return url
+
+##############################################################################
+
+
+def can_write_to_current_directory():
+
+ import tempfile
+
+ cur_dir = os.getcwd()
+
+# try:
+# tmp_file = tempfile.NamedTemporaryFile(dir = cur_dir)
+# except Exception as e:
+# return False
+
+ tmp_file_name = next(tempfile._get_candidate_names()) + '.tmp'
+
+ tmp_path = os.path.join(cur_dir, tmp_file_name)
+
+ try:
+ tmp_file = open(tmp_file_name, 'w')
+ except Exception as e:
+ return False
+
+ tmp_file.close()
+
+ os.remove(tmp_file_name)
+
+ return True
+
+##########################################################################
+
+
+def dump_repro_start(repro_prefix, ii):
+ import json
+
+ # Clean reproducibility and experiment files
+ for f in ['cm-output.json', 'version_info.json', '-input.json',
+ '-info.json', '-output.json', '-run-state.json']:
+ ff = repro_prefix + f if f.startswith('-') else f
+ if os.path.isfile(ff):
+ try:
+ os.remove(ff)
+ except BaseException:
+ pass
+
+ try:
+ with open(repro_prefix + '-input.json', 'w', encoding='utf-8') as f:
+ json.dump(ii, f, ensure_ascii=False, indent=2)
+ except BaseException:
+ pass
+
+ # Get some info
+ info = {}
+
+ try:
+ import platform
+ import sys
+
+ info['host_os_name'] = os.name
+ info['host_system'] = platform.system()
+ info['host_os_release'] = platform.release()
+ info['host_machine'] = platform.machine()
+ info['host_architecture'] = platform.architecture()
+ info['host_python_version'] = platform.python_version()
+ info['host_sys_version'] = sys.version
+
+ r = utils.gen_uid()
+ if r['return'] == 0:
+ info['run_uid'] = r['uid']
+
+ r = utils.get_current_date_time({})
+ if r['return'] == 0:
+ info['run_iso_datetime'] = r['iso_datetime']
+
+ with open(repro_prefix + '-info.json', 'w', encoding='utf-8') as f:
+ json.dump(info, f, ensure_ascii=False, indent=2)
+ except BaseException:
+ pass
+
+ # For experiment
+ cm_output = {}
+
+ cm_output['tmp_test_value'] = 10.0
+
+ cm_output['info'] = info
+ cm_output['input'] = ii
+
+ try:
+ with open('cm-output.json', 'w', encoding='utf-8') as f:
+ json.dump(cm_output, f, ensure_ascii=False, indent=2)
+ except BaseException:
+ pass
+
+ return {'return': 0}
+
+##########################################################################
+
+
+def dump_repro(repro_prefix, rr, run_state):
+ import json
+ import copy
+
+ try:
+ with open(repro_prefix + '-output.json', 'w', encoding='utf-8') as f:
+ json.dump(rr, f, ensure_ascii=False, indent=2)
+ except BaseException:
+ pass
+
+ try:
+ with open(repro_prefix + '-run-state.json', 'w', encoding='utf-8') as f:
+ json.dump(run_state, f, ensure_ascii=False, indent=2)
+ except BaseException:
+ pass
+
+ # For experiment
+ cm_output = {}
+
+ # Attempt to read
+ try:
+ r = utils.load_json('cm-output.json')
+ if r['return'] == 0:
+ cm_output = r['meta']
+ except BaseException:
+ pass
+
+ cm_output['output'] = rr
+ cm_output['state'] = copy.deepcopy(run_state)
+
+ # Try to load version_info.json
+ version_info = {}
+
+ version_info_orig = {}
+
+ if 'version_info' in cm_output['state']:
+ version_info_orig = cm_output['state']['version_info']
+ del (cm_output['state']['version_info'])
+
+ try:
+ r = utils.load_json('version_info.json')
+ if r['return'] == 0:
+ version_info_orig += r['meta']
+
+ for v in version_info_orig:
+ for key in v:
+ dep = v[key]
+ version_info[key] = dep
+
+ except BaseException:
+ pass
+
+ if len(version_info) > 0:
+ cm_output['version_info'] = version_info
+
+ if rr['return'] == 0:
+ # See https://cTuning.org/ae
+ cm_output['acm_ctuning_repro_badge_available'] = True
+ cm_output['acm_ctuning_repro_badge_functional'] = True
+
+ try:
+ with open('cm-output.json', 'w', encoding='utf-8') as f:
+ json.dump(
+ cm_output,
+ f,
+ ensure_ascii=False,
+ indent=2,
+ sort_keys=True)
+ except BaseException:
+ pass
+
+ return {'return': 0}
+
+
+##############################################################################
+# Demo to show how to use CM components independently if needed
+if __name__ == "__main__":
+ import cmind
+ auto = CAutomation(cmind, __file__)
+
+ r = auto.test({'x': 'y'})
+
+ logging.info(r)
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/module_help.py b/cmx4mlops/cmx4mlops/repo/automation/script/module_help.py
new file mode 100644
index 0000000000..e1eb4424a9
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/module_help.py
@@ -0,0 +1,119 @@
+# Author: Grigori Fursin
+# Contributors: Arjun Suresh, Anandhu Sooraj
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+import os
+from cmind import utils
+
+# Pring help about script
+
+
+def print_help(i):
+
+ meta = i.get('meta', '')
+ path = i.get('path', '')
+
+ if len(meta) == 0 and path == '':
+ return {'return': 0}
+
+ print('')
+ print(
+ 'Help for this CM script ({},{}):'.format(
+ meta.get(
+ 'alias', ''), meta.get(
+ 'uid', '')))
+
+ print('')
+ print('Path to this automation recipe: {}'.format(path))
+
+ variations = meta.get('variations', {})
+ if len(variations) > 0:
+ print('')
+ print('Available variations:')
+ print('')
+ for v in sorted(variations):
+ print(' _' + v)
+
+ input_mapping = meta.get('input_mapping', {})
+ if len(input_mapping) > 0:
+ print('')
+ print('Available flags mapped to environment variables:')
+ print('')
+ for k in sorted(input_mapping):
+ v = input_mapping[k]
+
+ print(' --{} -> --env.{}'.format(k, v))
+
+ input_description = meta.get('input_description', {})
+ if len(input_description) > 0:
+ # Check if has important ones (sort)
+ sorted_keys = []
+ all_keys = sorted(list(input_description.keys()))
+
+ for k in sorted(
+ all_keys, key=lambda x: input_description[x].get('sort', 0)):
+ v = input_description[k]
+ if v.get('sort', 0) > 0:
+ sorted_keys.append(k)
+
+ print('')
+ print('Available flags (Python API dict keys):')
+ print('')
+ for k in all_keys:
+ v = input_description[k]
+ n = v.get('desc', '')
+
+ x = ' --' + k
+ if n != '':
+ x += ' ({})'.format(n)
+
+ print(x)
+
+ if len(sorted_keys) > 0:
+ print('')
+ print('Main flags:')
+ print('')
+ for k in sorted_keys:
+ v = input_description[k]
+ n = v.get('desc', '')
+
+ x = ' --' + k
+
+ d = None
+ if 'default' in v:
+ d = v.get('default', '')
+
+ if d is not None:
+ x += '=' + d
+
+ c = v.get('choices', [])
+ if len(c) > 0:
+ x += ' {' + ','.join(c) + '}'
+
+ if n != '':
+ x += ' ({})'.format(n)
+
+ print(x)
+
+ print('')
+ x = input('Would you like to see a Python API with a list of common keys/flags for all scripts including this one (y/N)? ')
+
+ x = x.strip().lower()
+
+ skip_delayed_help = False if x in ['y', 'yes'] else True
+
+ r = {'return': 0}
+
+ if skip_delayed_help:
+ r['skip_delayed_help'] = True
+
+ return r
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/module_misc.py b/cmx4mlops/cmx4mlops/repo/automation/script/module_misc.py
new file mode 100644
index 0000000000..22b4cf2fdf
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/module_misc.py
@@ -0,0 +1,2518 @@
+# Author: Grigori Fursin
+# Contributors: Arjun Suresh, Anandhu Sooraj
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+import os
+from cmind import utils
+
+# Meta deps
+
+
+def process_deps(self_module, meta, meta_url, md_script_readme,
+ key, extra_space='', skip_from_meta=False, skip_if_empty=False):
+
+ x = ''
+ y = []
+ if len(meta.get(key, {})) > 0:
+ x = '***'
+
+ for d in meta[key]:
+ d_tags = d.get('tags', '')
+
+ z = extra_space + ' * ' + d_tags
+ y.append(z)
+
+ names = d.get('names', [])
+
+ for kk in [
+ ('enable_if_env', 'Enable this dependency only if all ENV vars are set'),
+ ('enable_if_any_env',
+ 'Enable this dependency only if any of ENV vars are set'),
+ ('skip_if_env',
+ 'Skip this dependenecy only if all ENV vars are set'),
+ ('skip_if_any_env',
+ 'Skip this dependenecy only if any of ENV vars are set')
+ ]:
+
+ k1 = kk[0]
+ k2 = kk[1]
+
+ conditions = d.get(k1, {})
+ if len(conditions) > 0:
+ y.append(extra_space +
+ ' * {}:
\n`{}`'.format(k2, str(conditions)))
+
+ if len(names) > 0:
+ y.append(
+ extra_space +
+ ' * CM names: `--adr.' +
+ str(names) +
+ '...`')
+
+ # Attempt to find related CM scripts
+ r = self_module.cmind.access({'action': 'find',
+ 'automation': 'script',
+ 'tags': d_tags})
+ if r['return'] == 0:
+ lst = r['list']
+
+ if len(lst) == 0:
+ y.append(extra_space +
+ ' - *Warning: no scripts found*')
+ else:
+ for s in lst:
+ s_repo_meta = s.repo_meta
+
+ s_repo_alias = s_repo_meta.get('alias', '')
+ s_repo_uid = s_repo_meta.get('uid', '')
+
+ # Check URL
+ s_url = ''
+ s_url_repo = ''
+ if s_repo_alias == 'internal':
+ s_url_repo = 'https://github.com/mlcommons/ck/tree/master/cm/cmind/repo'
+ s_url = s_url_repo + '/script/'
+ elif '@' in s_repo_alias:
+ s_url_repo = 'https://github.com/' + \
+ s_repo_alias.replace('@', '/') + '/tree/master'
+ if s_repo_meta.get('prefix', '') != '':
+ s_url_repo += '/' + s_repo_meta['prefix']
+ s_url = s_url_repo + '/script/'
+
+ s_alias = s.meta['alias']
+ y.append(
+ extra_space + ' - CM script: [{}]({})'.format(s_alias, s_url + s_alias))
+
+ z = ''
+ if not skip_from_meta:
+ z = ' from [meta]({})'.format(meta_url)
+
+ if not skip_if_empty or len(y) > 0:
+ md_script_readme.append(
+ (extra_space +
+ ' 1. ' +
+ x +
+ 'Read "{}" on other CM scripts' +
+ z +
+ x).format(key))
+ md_script_readme += y
+
+############################################################
+
+
+def doc(i):
+ """
+ Add CM automation.
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ parsed_artifact (list): prepared in CM CLI or CM access function
+ [ (artifact alias, artifact UID) ] or
+ [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
+
+ (repos) (str): list of repositories to search for automations
+
+ (output_dir) (str): output directory (../docs by default)
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ """
+
+ self_module = i['self_module']
+
+ cur_dir = os.getcwd()
+
+ template_file = 'template_list_of_scripts.md'
+ list_file = 'list_of_scripts.md'
+
+ public_taskforce = '[Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md)'
+
+ console = i.get('out') == 'con'
+
+ repos = i.get('repos', '')
+ if repos == '':
+ repos = 'internal,a4705959af8e447a'
+
+ parsed_artifact = i.get('parsed_artifact', [])
+
+ if len(parsed_artifact) < 1:
+ parsed_artifact = [('', ''), ('', '')]
+ elif len(parsed_artifact) < 2:
+ parsed_artifact.append(('', ''))
+ else:
+ repos = parsed_artifact[1][0]
+
+ list_of_repos = repos.split(',') if ',' in repos else [repos]
+
+ ii = utils.sub_input(i, self_module.cmind.cfg['artifact_keys'] + ['tags'])
+
+ ii['out'] = None
+
+ # Search for automations in repos
+ lst = []
+
+ for repo in list_of_repos:
+ parsed_artifact[1] = (
+ '', repo) if utils.is_cm_uid(repo) else (
+ repo, '')
+ ii['parsed_artifact'] = parsed_artifact
+ r = self_module.search(ii)
+ if r['return'] > 0:
+ return r
+ lst += r['list']
+
+ md = []
+
+ toc = []
+
+ toc_category = {}
+ toc_category_sort = {}
+ script_meta = {}
+ urls = {}
+
+ for artifact in sorted(lst, key=lambda x: x.meta.get('alias', '')):
+
+ toc_readme = []
+
+ # Common index for all scripts
+ md_script = []
+
+ path = artifact.path
+ meta = artifact.meta
+ original_meta = artifact.original_meta
+
+ print('Documenting {}'.format(path))
+
+ alias = meta.get('alias', '')
+ uid = meta.get('uid', '')
+
+ script_meta[alias] = meta
+
+ name = meta.get('name', '')
+ developers = meta.get('developers', '')
+
+ # Check if has tags help otherwise all tags
+ tags = meta.get('tags_help', '').strip()
+ if tags == '':
+ tags = meta.get('tags', [])
+ else:
+ tags = tags.split(' ')
+
+ variations = meta.get('variations', {})
+
+ variation_keys = sorted(list(variations.keys()))
+ version_keys = sorted(list(meta.get('versions', {}).keys()))
+
+ default_variation = meta.get('default_variation', '')
+ default_version = meta.get('default_version', '')
+
+ input_mapping = meta.get('input_mapping', {})
+ input_description = meta.get('input_description', {})
+
+ category = meta.get('category', '').strip()
+ category_sort = meta.get('category_sort', 0)
+ if category != '':
+ if category not in toc_category:
+ toc_category[category] = []
+
+ if category not in toc_category_sort or category_sort > 0:
+ toc_category_sort[category] = category_sort
+
+ if alias not in toc_category[category]:
+ toc_category[category].append(alias)
+
+ repo_path = artifact.repo_path
+ repo_meta = artifact.repo_meta
+
+ repo_alias = repo_meta.get('alias', '')
+ repo_uid = repo_meta.get('uid', '')
+
+ # Check URL
+ url = ''
+ url_repo = ''
+ if repo_alias == 'internal':
+ url_repo = 'https://github.com/mlcommons/ck/tree/dev/cm/cmind/repo'
+ url = url_repo + '/script/'
+ elif '@' in repo_alias:
+ url_repo = 'https://github.com/' + \
+ repo_alias.replace('@', '/') + '/tree/dev'
+ if repo_meta.get('prefix', '') != '':
+ url_repo += '/' + repo_meta['prefix']
+ url = url_repo + '/script/'
+
+ if url != '':
+ url += alias
+
+ urls[alias] = url
+
+ # Check if there is about doc
+ path_readme = os.path.join(path, 'README.md')
+ path_readme_extra = os.path.join(path, 'README-extra.md')
+ path_readme_about = os.path.join(path, 'README-about.md')
+
+ readme_about = ''
+ if os.path.isfile(path_readme_about):
+ r = utils.load_txt(path_readme_about, split=True)
+ if r['return'] > 0:
+ return
+
+ s = r['string']
+ readme_about = r['list']
+
+ #######################################################################
+ # Start automatically generated README
+ md_script_readme = [
+ # '',
+ # 'Click here to see the table of contents.
',
+ # '{{CM_README_TOC}}',
+ # ' ',
+ # '',
+ 'Automatically generated README for this automation recipe: **{}**'.format(
+ meta['alias']),
+ ]
+
+ md_script.append('## ' + alias)
+ md_script.append('')
+
+# x = 'About'
+# md_script_readme.append('___')
+# md_script_readme.append('### '+x)
+# md_script_readme.append('')
+# toc_readme.append(x)
+
+# x = 'About'
+# md_script_readme.append('#### '+x)
+# md_script_readme.append('')
+# toc_readme.append(' '+x)
+
+ if name != '':
+ name += '.'
+ md_script.append('*' + name + '*')
+ md_script.append('')
+
+# md_script_readme.append('*'+name+'*')
+# md_script_readme.append('')
+
+ if os.path.isfile(path_readme):
+ r = utils.load_txt(path_readme, split=True)
+ if r['return'] > 0:
+ return
+
+ s = r['string']
+ readme = r['list']
+
+ if not 'automatically generated' in s.lower():
+ found_path_readme_extra = True
+
+ # Attempt to rename to README-extra.md
+ if os.path.isfile(path_readme_extra):
+ return {
+ 'return': 1, 'error': 'README.md is not auto-generated and README-extra.md already exists - can\'t rename'}
+
+ os.rename(path_readme, path_readme_extra)
+
+ # Add to Git (if in git)
+ os.chdir(path)
+ os.system('git add README-extra.md')
+ os.chdir(cur_dir)
+
+ if category != '':
+ md_script_readme.append('')
+ md_script_readme.append('Category: **{}**'.format(category))
+
+ md_script_readme.append('')
+ md_script_readme.append('License: **Apache 2.0**')
+
+ md_script_readme.append('')
+
+ if developers == '':
+ md_script_readme.append('Maintainers: ' + public_taskforce)
+ else:
+ md_script_readme.append('Developers: ' + developers)
+
+ x = '* [{}]({})'.format(alias, url)
+ if name != '':
+ x += ' *(' + name + ')*'
+ toc.append(x)
+
+ cm_readme_extra = '[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name={},{}) ] '.format(
+ alias, uid)
+
+ if os.path.isfile(path_readme_extra):
+ readme_extra_url = url + '/README-extra.md'
+
+ x = '* Notes from the authors, contributors and users: [*GitHub*]({})'.format(
+ readme_extra_url)
+ md_script.append(x)
+
+ cm_readme_extra += '[ [Notes from the authors, contributors and users](README-extra.md) ] '
+
+ md_script_readme.append('')
+ md_script_readme.append('---')
+ md_script_readme.append('*' + cm_readme_extra.strip() + '*')
+
+ if readme_about != '':
+ md_script_readme += ['', '---', ''] + readme_about
+
+ x = 'Summary'
+ md_script_readme.append('')
+ md_script_readme.append('---')
+ md_script_readme += [
+ # '',
+ # 'Click to see the summary
',
+ '#### Summary',
+ ''
+ ]
+ toc_readme.append(x)
+
+
+# if category != '':
+# x = 'Category'
+# md_script_readme.append('___')
+# md_script_readme.append('#### '+x)
+# md_script_readme.append(' ')
+# md_script_readme.append(category+'.')
+# toc_readme.append(x)
+
+# x = '* Category: *{}*'.format(category + '.')
+# md_script_readme.append(x)
+
+
+# x = 'Origin'
+# md_script_readme.append('___')
+# md_script_readme.append('#### '+x)
+# md_script_readme.append('')
+# toc_readme.append(x)
+
+ x = '* CM GitHub repository: *[{}]({})*'.format(repo_alias, url_repo)
+ md_script.append(x)
+ md_script_readme.append(x)
+
+ x = '* GitHub directory for this script: *[GitHub]({})*'.format(url)
+ md_script.append(x)
+ md_script_readme.append(x)
+
+ # Check meta
+ meta_file = self_module.cmind.cfg['file_cmeta']
+ meta_path = os.path.join(path, meta_file)
+
+ meta_file += '.yaml' if os.path.isfile(
+ meta_path + '.yaml') else '.json'
+
+ meta_url = url + '/' + meta_file
+
+ x = '* CM meta description of this script: *[GitHub]({})*'.format(
+ meta_url)
+ md_script.append(x)
+
+# x = '* CM automation "script": *[Docs]({})*'.format('https://github.com/octoml/ck/blob/master/docs/list_of_automations.md#script')
+# md_script.append(x)
+# md_script_readme.append(x)
+
+ if len(variation_keys) > 0:
+ variation_pointer = "[,variations]"
+ variation_pointer2 = "[variations]"
+ else:
+ variation_pointer = ''
+ variation_pointer2 = ''
+
+ if len(input_mapping) > 0:
+ input_mapping_pointer = "[--input_flags]"
+ else:
+ input_mapping_pointer = ''
+
+ cli_all_tags = '`cm run script --tags={}`'.format(','.join(tags))
+ cli_all_tags3 = '`cm run script --tags={}{} {}`'.format(
+ ','.join(tags), variation_pointer, input_mapping_pointer)
+ x = '* CM CLI with all tags: {}*'.format(cli_all_tags)
+ md_script.append(x)
+
+ cli_help_tags_alternative = '`cmr "{}" --help`'.format(' '.join(tags))
+
+ cli_all_tags_alternative = '`cmr "{}"`'.format(' '.join(tags))
+ cli_all_tags_alternative3 = '`cmr "{} {}" {}`'.format(
+ ' '.join(tags), variation_pointer2, input_mapping_pointer)
+ cli_all_tags_alternative_j = '`cmr "{} {}" {} -j`'.format(
+ ' '.join(tags), variation_pointer, input_mapping_pointer)
+ x = '* CM CLI alternative: {}*'.format(cli_all_tags_alternative)
+ md_script.append(x)
+
+ cli_all_tags_alternative_docker = '`cm docker script "{}{}" {}`'.format(
+ ' '.join(tags), variation_pointer2, input_mapping_pointer)
+
+
+# cli_uid = '`cm run script {} {}`'.format(meta['uid'], input_mapping_pointer)
+# x = '* CM CLI with alias and UID: {}*'.format(cli_uid)
+# md_script.append(x)
+
+ if len(variation_keys) > 0:
+ x = ''
+ for variation in variation_keys:
+ if x != '':
+ x += '; '
+ x += '_' + variation
+ md_script.append('* Variations: *{}*'.format(x))
+
+ if default_variation != '':
+ md_script.append(
+ '* Default variation: *{}*'.format(default_variation))
+
+ if len(version_keys) > 0:
+ md_script.append(
+ '* Versions: *{}*'.format('; '.join(version_keys)))
+
+ if default_version != '':
+ md_script.append('* Default version: *{}*'.format(default_version))
+
+ md_script.append('')
+# md_script_readme.append('')
+
+ # Add extra to README
+ x = 'Meta description'
+# md_script_readme.append('___')
+# md_script_readme.append('### '+x)
+ md_script_readme.append(
+ '* CM meta description for this script: *[{}]({})*'.format(meta_file, meta_file))
+# md_script_readme.append('')
+# toc_readme.append(x)
+
+ x = 'Tags'
+# md_script_readme.append('___')
+# md_script_readme.append('### '+x)
+ md_script_readme.append(
+ '* All CM tags to find and reuse this script (see in above meta description): *{}*'.format(','.join(tags)))
+# md_script_readme.append('')
+# toc_readme.append(x)
+
+ cache = meta.get('cache', False)
+ md_script_readme.append('* Output cached? *{}*'.format(str(cache)))
+
+ md_script_readme.append(
+ '* See [pipeline of dependencies]({}) on other CM scripts'.format('#dependencies-on-other-cm-scripts'))
+
+ md_script_readme += ['',
+ # ' '
+ ]
+
+ # Add usage
+ x1 = 'Reuse this script in your project'
+ x1a = 'Install MLCommons CM automation meta-framework'
+ x1aa = 'Pull CM repository with this automation recipe (CM script)'
+ x1b = 'Print CM help from the command line'
+ x2 = 'Customize and run this script from the command line with different variations and flags'
+ x3 = 'Run this script from Python'
+ x3a = 'Run this script via GUI'
+ x4 = 'Run this script via Docker (beta)'
+ md_script_readme += [
+ '',
+ '---',
+ '### ' + x1,
+ '',
+ '#### ' + x1a,
+ '',
+ '* [Install CM](https://access.cknowledge.org/playground/?action=install)',
+ '* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md)',
+ '',
+ '#### ' + x1aa,
+ '',
+ '```cm pull repo {}```'.format(repo_alias),
+ '',
+ '#### ' + x1b,
+ '',
+ '```{}```'.format(cli_help_tags_alternative),
+ '',
+ '#### ' + x2,
+ '',
+ '{}'.format(cli_all_tags),
+ '',
+ '{}'.format(cli_all_tags3),
+ '',
+ '*or*',
+ '',
+ '{}'.format(cli_all_tags_alternative),
+ '',
+ '{}'.format(cli_all_tags_alternative3),
+ '',
+ # '3. {}'.format(cli_uid),
+ '']
+
+ x = ' and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.'
+ if len(variation_keys) > 0:
+ md_script_readme += ['* *See the list of `variations` [here](#variations)' + x + '*',
+ ''
+ ]
+
+ if input_description and len(input_description) > 0:
+ x = 'Input Flags'
+ md_script_readme.append('')
+ md_script_readme.append('#### ' + x)
+ toc_readme.append(' ' + x)
+
+ md_script_readme.append('')
+ key0 = ''
+ for key in input_description:
+ if key0 == '':
+ key0 = key
+
+ value = input_description[key]
+ desc = value
+
+ if isinstance(value, dict):
+ desc = value['desc']
+
+ choices = value.get('choices', [])
+ if len(choices) > 0:
+ desc += ' {' + ','.join(choices) + '}'
+
+ default = value.get('default', '')
+ if default != '':
+ desc += ' (*' + str(default) + '*)'
+
+ md_script_readme.append('* --**{}**={}'.format(key, desc))
+
+ md_script_readme.append('')
+ md_script_readme.append(
+ '**Above CLI flags can be used in the Python CM API as follows:**')
+ md_script_readme.append('')
+
+ x = '```python\nr=cm.access({... , "' + key0 + '":...}\n```'
+ md_script_readme.append(x)
+
+ md_script_readme += ['#### ' + x3,
+ '',
+ '',
+ 'Click here to expand this section.
',
+ '',
+ '```python',
+ '',
+ 'import cmind',
+ '',
+ "r = cmind.access({'action':'run'",
+ " 'automation':'script',",
+ " 'tags':'{}'".format(
+ ','.join(tags)),
+ " 'out':'con',",
+ " ...",
+ " (other input keys for this script)",
+ " ...",
+ " })",
+ "",
+ "if r['return']>0:",
+ " print (r['error'])",
+ '',
+ '```',
+ '',
+ ' ',
+ '',
+
+ '',
+ '#### ' + x3a,
+ '',
+ '```cmr "cm gui" --script="' +
+ ','.join(tags) + '"```',
+ '',
+ # 'Use this [online GUI](https://cKnowledge.org/cm-gui/?tags={}) to generate CM CMD.'.format(','.join(tags)),
+ # '',
+ '#### ' + x4,
+ '',
+ '{}'.format(cli_all_tags_alternative_docker),
+ ''
+ ]
+ toc_readme.append(x1)
+ toc_readme.append(' ' + x1a)
+ toc_readme.append(' ' + x1b)
+ toc_readme.append(' ' + x2)
+ toc_readme.append(' ' + x3)
+ toc_readme.append(' ' + x3a)
+ toc_readme.append(' ' + x4)
+
+ x = 'Customization'
+ md_script_readme.append('___')
+ md_script_readme.append('### ' + x)
+ md_script_readme.append('')
+ toc_readme.append(x)
+
+ if len(variation_keys) > 0:
+ # x = 'Variation groups'
+ # md_script_readme.append('___')
+ # md_script_readme.append('### '+x)
+ # toc_readme.append(x)
+
+ variation_groups = {}
+ default_variations = []
+ variation_md = {}
+ variation_alias = {}
+
+ # Normally should not use anymore. Should use default:true inside
+ # individual variations.
+ default_variation = meta.get('default_variation', '')
+
+ for variation_key in sorted(variation_keys):
+ variation = variations[variation_key]
+
+ alias = variation.get('alias', '').strip()
+
+ if alias != '':
+ aliases = variation_alias.get(alias, [])
+ if variation_key not in aliases:
+ aliases.append(variation_key)
+ variation_alias[alias] = aliases
+
+ # Do not continue this loop if alias
+ continue
+
+ default = variation.get('default', False)
+
+ if not default:
+ # Check outdated
+ if default_variation == variation_key:
+ default = True
+
+ extra1 = ''
+ extra2 = ''
+ if default:
+ extra1 = '**'
+ extra2 = '** (default)'
+
+ default_variations.append(variation_key)
+
+ md_var = []
+
+ md_var.append(
+ '* {}`_{}`{}'.format(extra1, variation_key, extra2))
+
+ variation_md[variation_key] = md_var
+
+# md_script_readme+=md_var
+
+ group = variation.get('group', '')
+
+ if variation_key.endswith('_'):
+ group = '*Internal group (variations should not be selected manually)*'
+ elif group == '':
+ group = '*No group (any variation can be selected)*'
+
+ if group not in variation_groups:
+ variation_groups[group] = []
+
+ variation_groups[group].append(variation_key)
+
+ x = 'Variations'
+ md_script_readme.append('')
+ md_script_readme.append('#### ' + x)
+ toc_readme.append(' ' + x)
+
+ variation_groups_order = meta.get('variation_groups_order', [])
+ for variation in sorted(variation_groups):
+ if variation not in variation_groups_order:
+ variation_groups_order.append(variation)
+
+ for group_key in variation_groups_order:
+ md_script_readme.append('')
+
+ if not group_key.startswith('*'):
+ md_script_readme.append(
+ ' * Group "**{}**"'.format(group_key))
+ else:
+ md_script_readme.append(' * {}'.format(group_key))
+
+ md_script_readme += [
+ ' ',
+ ' Click here to expand this section.
',
+ ''
+ ]
+
+ for variation_key in sorted(variation_groups[group_key]):
+ variation = variations[variation_key]
+
+ xmd = variation_md[variation_key]
+
+ aliases = variation_alias.get(variation_key, [])
+ aliases2 = ['_' + v for v in aliases]
+
+ if len(aliases) > 0:
+ xmd.append(
+ ' - Aliases: `{}`'.format(','.join(aliases2)))
+
+ if len(variation.get('env', {})) > 0:
+ xmd.append(' - Environment variables:')
+ for key in variation['env']:
+ xmd.append(
+ ' - *{}*: `{}`'.format(key, variation['env'][key]))
+
+ xmd.append(' - Workflow:')
+
+ for dep in ['deps', 'prehook_deps',
+ 'posthook_deps', 'post_deps']:
+ process_deps(
+ self_module,
+ variation,
+ meta_url,
+ xmd,
+ dep,
+ ' ',
+ True,
+ True)
+
+ for x in xmd:
+ md_script_readme.append(' ' + x)
+
+ md_script_readme.append('')
+ md_script_readme.append(' ')
+ md_script_readme.append('')
+
+ # Check if has invalid_variation_combinations
+ vvc = meta.get('invalid_variation_combinations', [])
+ if len(vvc) > 0:
+ x = 'Unsupported or invalid variation combinations'
+ md_script_readme.append('')
+ md_script_readme.append('#### ' + x)
+ md_script_readme.append('')
+ md_script_readme.append('')
+ md_script_readme.append('')
+ toc_readme.append(' ' + x)
+
+ for v in vvc:
+ vv = ['_' + x for x in v]
+ md_script_readme.append('* `' + ','.join(vv) + '`')
+
+ if len(default_variations) > 0:
+ md_script_readme.append('')
+ md_script_readme.append('#### Default variations')
+ md_script_readme.append('')
+
+ dv = ['_' + x for x in sorted(default_variations)]
+
+ md_script_readme.append('`{}`'.format(','.join(dv)))
+
+ # Check if has valid_variation_combinations
+ vvc = meta.get('valid_variation_combinations', [])
+ if len(vvc) > 0:
+ x = 'Valid variation combinations checked by the community'
+ md_script_readme.append('')
+ md_script_readme.append('#### ' + x)
+ md_script_readme.append('')
+ md_script_readme.append('')
+ md_script_readme.append('')
+ toc_readme.append(' ' + x)
+
+ for v in vvc:
+ vv = ['_' + x for x in v]
+ md_script_readme.append('* `' + ','.join(vv) + '`')
+
+ # Check input flags
+ if input_mapping and len(input_mapping) > 0:
+ x = 'Script flags mapped to environment'
+ md_script_readme.append('')
+ md_script_readme.append('#### ' + x)
+ toc_readme.append(' ' + x)
+
+ md_script_readme.append('')
+ md_script_readme.append(
+ 'Click here to expand this section.
')
+
+ md_script_readme.append('')
+ key0 = ''
+ for key in sorted(input_mapping):
+ if key0 == '':
+ key0 = key
+ value = input_mapping[key]
+ md_script_readme.append(
+ '* `--{}=value` → `{}=value`'.format(key, value))
+
+ md_script_readme.append('')
+ md_script_readme.append(
+ '**Above CLI flags can be used in the Python CM API as follows:**')
+ md_script_readme.append('')
+
+ x = '```python\nr=cm.access({... , "' + key0 + '":...}\n```'
+ md_script_readme.append(x)
+
+ md_script_readme.append('')
+ md_script_readme.append(' ')
+ md_script_readme.append('')
+
+ # Default environment
+ default_env = meta.get('default_env', {})
+
+ x = 'Default environment'
+# md_script_readme.append('___')
+ md_script_readme.append('#### ' + x)
+ toc_readme.append(' ' + x)
+
+ md_script_readme.append('')
+ md_script_readme.append('')
+ md_script_readme.append(
+ 'Click here to expand this section.
')
+ md_script_readme.append('')
+ md_script_readme.append(
+ 'These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.')
+ md_script_readme.append('')
+
+ for key in default_env:
+ value = default_env[key]
+ md_script_readme.append('* {}: `{}`'.format(key, value))
+
+ md_script_readme.append('')
+ md_script_readme.append(' ')
+ md_script_readme.append('')
+
+ if len(version_keys) > 0 or default_version != '':
+ x = 'Versions'
+# md_script_readme.append('___')
+ md_script_readme.append('#### ' + x)
+ toc_readme.append(x)
+
+ if default_version != '':
+ md_script_readme.append(
+ 'Default version: `{}`'.format(default_version))
+ md_script_readme.append('')
+
+ if len(version_keys) > 0:
+ for version in version_keys:
+ md_script_readme.append('* `{}`'.format(version))
+
+ # Add workflow
+ x = 'Dependencies on other CM scripts'
+ md_script_readme += ['___',
+ '### ' + x,
+ '']
+ toc_readme.append(x)
+
+# md_script_readme.append('')
+# md_script_readme.append('Click here to expand this section.
')
+
+ md_script_readme.append('')
+
+ # Check customize.py file
+ path_customize = os.path.join(path, 'customize.py')
+ found_customize = False
+ found_customize_preprocess = False
+ found_customize_postprocess = False
+ found_output_env = []
+
+ if os.path.isfile(path_customize):
+ found_customize = True
+
+ r = utils.load_txt(path_customize, split=True)
+ if r['return'] > 0:
+ return r
+
+ customize = r['string']
+ customize_l = r['list']
+
+ if 'def preprocess(' in customize:
+ found_customize_preprocess = True
+
+ if 'def postprocess(' in customize:
+ found_customize_postprocess = True
+
+ # Ugly attempt to get output env
+ found_postprocess = False
+ for l in customize_l:
+ # if not found_postprocess:
+ # if 'def postprocess' in l:
+ # found_postprocess = True
+ # else:
+ j = l.find(' env[')
+ if j >= 0:
+ j1 = l.find(']', j + 4)
+ if j1 >= 0:
+ j2 = l.find('=', j1 + 1)
+ if j2 >= 0:
+ key2 = l[j + 5:j1].strip()
+ key = key2[1:-1]
+
+ if key.startswith(
+ 'CM_') and 'TMP' not in key and key not in found_output_env:
+ found_output_env.append(key)
+
+ process_deps(self_module, meta, meta_url, md_script_readme, 'deps')
+
+ x = ''
+ y = 'customize.py'
+ if found_customize_preprocess:
+ x = '***'
+ y = '[' + y + '](' + url + '/' + y + ')'
+ md_script_readme.append(
+ (' 1. ' + x + 'Run "preprocess" function from {}' + x).format(y))
+
+ process_deps(
+ self_module,
+ meta,
+ meta_url,
+ md_script_readme,
+ 'prehook_deps')
+
+ # Check scripts
+ files = os.listdir(path)
+ x = ''
+ y = []
+ for f in sorted(files):
+ x = '***'
+ if f.startswith('run') and (
+ f.endswith('.sh') or f.endswith('.bat')):
+ f_url = url + '/' + f
+ y.append(' * [{}]({})'.format(f, f_url))
+
+ md_script_readme.append(
+ (' 1. ' + x + 'Run native script if exists' + x).format(y))
+ md_script_readme += y
+
+ process_deps(
+ self_module,
+ meta,
+ meta_url,
+ md_script_readme,
+ 'posthook_deps')
+
+ x = ''
+ y = 'customize.py'
+ if found_customize_postprocess:
+ x = '***'
+ y = '[' + y + '](' + url + '/' + y + ')'
+ md_script_readme.append(
+ (' 1. ' + x + 'Run "postrocess" function from {}' + x).format(y))
+
+ process_deps(
+ self_module,
+ meta,
+ meta_url,
+ md_script_readme,
+ 'post_deps')
+ # md_script_readme.append(' ')
+ md_script_readme.append('')
+
+ # New environment
+ new_env_keys = meta.get('new_env_keys', [])
+
+ x = 'Script output'
+ md_script_readme.append('___')
+ md_script_readme.append('### ' + x)
+ toc_readme.append(x)
+
+ md_script_readme.append(cli_all_tags_alternative_j)
+
+ x = 'New environment keys (filter)'
+ md_script_readme.append('#### ' + x)
+ toc_readme.append(x)
+
+ md_script_readme.append('')
+ for key in sorted(new_env_keys):
+ md_script_readme.append('* `{}`'.format(key))
+
+ # Pass found_output_env through above filter
+ found_output_env_filtered = []
+
+ import fnmatch
+
+ for key in found_output_env:
+ add = False
+
+ for f in new_env_keys:
+ if fnmatch.fnmatch(key, f):
+ add = True
+ break
+
+ if add:
+ found_output_env_filtered.append(key)
+
+ x = 'New environment keys auto-detected from customize'
+ md_script_readme.append('#### ' + x)
+ toc_readme.append(x)
+
+ md_script_readme.append('')
+ for key in sorted(found_output_env_filtered):
+ md_script_readme.append('* `{}`'.format(key))
+
+ # Add maintainers
+# x = 'Maintainers'
+# md_script_readme.append('___')
+# md_script_readme.append('### '+x)
+# md_script_readme.append('')
+# md_script_readme.append('* ' + public_taskforce)
+# toc_readme.append(x)
+
+ # Process TOC
+ toc_readme_string = '\n'
+ for x in toc_readme:
+ x2 = x
+ prefix = ''
+
+ if x.startswith(' '):
+ prefix = ' '
+ x2 = x[1:]
+
+ x2 = x2.lower().replace(' ', '-').replace(',', '')
+ toc_readme_string += prefix + '* [{}](#{})\n'.format(x, x2)
+
+ # Add to the total list
+ md += md_script
+
+ s = '\n'.join(md_script_readme)
+
+ s = s.replace('{{CM_README_EXTRA}}', cm_readme_extra)
+# s = s.replace('{{CM_SEE_README_EXTRA}}', cm_see_readme_extra)
+ s = s.replace('{{CM_README_TOC}}', toc_readme_string)
+
+ r = utils.save_txt(path_readme, s)
+ if r['return'] > 0:
+ return r
+
+ # Add to Git (if in git)
+ os.chdir(path)
+ os.system('git add README.md')
+ os.chdir(cur_dir)
+
+ # Recreate TOC with categories
+ toc2 = []
+
+ # , key = lambda x: -toc_category_sort[x]):
+ for category in sorted(toc_category):
+ toc2.append('### ' + category)
+ toc2.append('')
+
+ for script in sorted(toc_category[category]):
+
+ meta = script_meta[script]
+
+ name = meta.get('name', '')
+
+ url = urls[script]
+
+ x = '* [{}]({})'.format(script, url)
+ if name != '':
+ x += ' *(' + name + ')*'
+
+ toc2.append(x)
+
+ toc2.append('')
+
+ toc_category_string = ''
+ for category in sorted(toc_category):
+ category_link = category.lower().replace(' ', '-').replace('/', '')
+ toc_category_string += '* [{}](#{})\n'.format(category, category_link)
+
+ # Load template
+ r = utils.load_txt(os.path.join(self_module.path, template_file))
+ if r['return'] > 0:
+ return r
+
+ s = r['string']
+
+ s = s.replace('{{CM_TOC2}}', '\n'.join(toc2))
+ s = s.replace('{{CM_TOC}}', '\n'.join(toc))
+# s = s.replace('{{CM_MAIN}}', '\n'.join(md))
+ s = s.replace('{{CM_MAIN}}', '')
+ s = s.replace('{{CM_TOC_CATEGORIES}}', toc_category_string)
+
+ # Output
+ output_dir = i.get('output_dir', '')
+
+ if output_dir == '':
+ output_dir = '..'
+
+ output_file = os.path.join(output_dir, list_file)
+
+ r = utils.save_txt(output_file, s)
+ if r['return'] > 0:
+ return r
+
+ out_docs_file = os.path.join(
+ "..",
+ "docs",
+ "scripts",
+ category,
+ alias,
+ "index.md")
+ r = utils.save_txt(out_docs_file, s)
+ if r['return'] > 0:
+ return r
+
+ return {'return': 0}
+
+
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# This function takes in a host path and returns the absolute path on host and the container
+# If mounts is passed, the function appends the host path and the
+# container path to mounts in the form "host_path:container_path"
+def update_path_for_docker(path, mounts=None, force_path_target=''):
+
+ path_orig = ''
+ path_target = ''
+
+ if path != '': # and (os.path.isfile(path) or os.path.isdir(path)):
+ path = os.path.abspath(path)
+
+ path_target = path
+ path_orig = path
+
+ if os.name == 'nt':
+ from pathlib import PureWindowsPath, PurePosixPath
+
+ x = PureWindowsPath(path_orig)
+ path_target = str(PurePosixPath('/', *x.parts[1:]))
+
+ if not path_target.startswith('/'):
+ path_target = '/' + path_target
+
+ path_target = '/cm-mount' + \
+ path_target if force_path_target == '' else force_path_target
+
+ # If file, mount directory
+ if os.path.isfile(path) or not os.path.isdir(path):
+ x = os.path.dirname(path_orig) + ':' + os.path.dirname(path_target)
+ else:
+ x = path_orig + ':' + path_target
+
+ # CHeck if no duplicates
+ if mounts is not None:
+ to_add = True
+ for y in mounts:
+ if y.lower() == x.lower():
+ to_add = False
+ break
+ if to_add:
+ mounts.append(x)
+
+ return (path_orig, path_target)
+
+############################################################
+
+
+def process_inputs(i):
+
+ import copy
+
+ i_run_cmd_arc = i['run_cmd_arc']
+ docker_settings = i['docker_settings']
+ mounts = i['mounts']
+
+ # Check if need to update/map/mount inputs and env
+ i_run_cmd = copy.deepcopy(i_run_cmd_arc)
+
+ def get_value_using_key_with_dots(d, k):
+ v = None
+ j = k.find('.')
+ if j >= 0:
+ k1 = k[:j]
+ k2 = k[j + 1:]
+
+ if k1 in d:
+ v = d[k1]
+
+ if '.' in k2:
+ v, d, k = get_value_using_key_with_dots(v, k2)
+ else:
+ d = v
+ k = k2
+ if isinstance(v, dict):
+ v = v.get(k2)
+ else:
+ v = None
+ else:
+ if k == '':
+ v = d
+ else:
+ v = d.get(k)
+
+ return v, d, k
+
+ docker_input_paths = docker_settings.get('input_paths', [])
+ if len(i_run_cmd) > 0:
+ for k in docker_input_paths:
+ v2, i_run_cmd2, k2 = get_value_using_key_with_dots(i_run_cmd, k)
+
+ if v2 is not None:
+ v = i_run_cmd2[k2]
+
+ path_orig, path_target = update_path_for_docker(v, mounts)
+
+ if path_target != '':
+ i_run_cmd2[k2] = path_target
+
+ return {'return': 0, 'run_cmd': i_run_cmd}
+
+
+############################################################
+def regenerate_script_cmd(i):
+
+ script_uid = i['script_uid']
+ script_alias = i['script_alias']
+ tags = i['tags']
+ docker_settings = i['docker_settings']
+ fake_run = i.get('fake_run', False)
+
+ i_run_cmd = i['run_cmd']
+
+ # Cleanup from env everything that has a host path value
+ if i_run_cmd.get('env'):
+ for key in list(i_run_cmd.get('env')):
+ if isinstance(i_run_cmd['env'][key], str) and ((os.path.join("local", "cache", "") in i_run_cmd['env'][key]) or (
+ os.path.join("CM", "repos", "") in i_run_cmd['env'][key])):
+ del (i_run_cmd['env'][key])
+ elif isinstance(i_run_cmd['env'][key], list):
+ values_to_remove = []
+ for val in i_run_cmd['env'][key]:
+ if isinstance(val, str) and ((os.path.join("local", "cache", "") in val) or (
+ os.path.join("CM", "repos", "") in val)):
+ values_to_remove.append(val)
+ if values_to_remove == i_run_cmd['env'][key]:
+ del (i_run_cmd['env'][key])
+ else:
+ for val in values_to_remove:
+ i_run_cmd['env'][key].remove(val)
+
+ docker_run_cmd_prefix = i['docker_run_cmd_prefix']
+
+ # Regenerate command from dictionary input
+ run_cmd = 'cm run script'
+
+ x = ''
+
+ # Check if there are some tags without variation
+ requested_tags = i_run_cmd.get('tags', [])
+
+ tags_without_variation = False
+ for t in requested_tags:
+ if not t.startswith('_'):
+ tags_without_variation = True
+ break
+
+ if not tags_without_variation:
+ # If no tags without variation, add script alias and UID explicitly
+ if script_uid != '':
+ x = script_uid
+ if script_alias != '':
+ if x != '':
+ x = ',' + x
+ x = script_alias + x
+
+ if x != '':
+ run_cmd += ' ' + x + ' '
+
+ skip_input_for_fake_run = docker_settings.get(
+ 'skip_input_for_fake_run', [])
+ add_quotes_to_keys = docker_settings.get('add_quotes_to_keys', [])
+
+ def rebuild_flags(i_run_cmd, fake_run,
+ skip_input_for_fake_run, add_quotes_to_keys, key_prefix):
+
+ run_cmd = ''
+
+ keys = list(i_run_cmd.keys())
+
+ if 'tags' in keys:
+ # Move tags first
+ tags_position = keys.index('tags')
+ del (keys[tags_position])
+ keys = ['tags'] + keys
+
+ for k in keys:
+ # Assemble long key if dictionary
+ long_key = key_prefix
+ if long_key != '':
+ long_key += '.'
+ long_key += k
+
+ if fake_run and long_key in skip_input_for_fake_run:
+ continue
+
+ v = i_run_cmd[k]
+
+ q = '\\"' if long_key in add_quotes_to_keys else ''
+
+ if isinstance(v, dict):
+ run_cmd += rebuild_flags(v,
+ fake_run,
+ skip_input_for_fake_run,
+ add_quotes_to_keys,
+ long_key)
+ elif isinstance(v, list):
+ x = ''
+ for vv in v:
+ if x != '':
+ x += ','
+ x += q + str(vv) + q
+ run_cmd += ' --' + long_key + ',=' + x
+ else:
+ run_cmd += ' --' + long_key + '=' + q + str(v) + q
+
+ return run_cmd
+
+ run_cmd += rebuild_flags(i_run_cmd,
+ fake_run,
+ skip_input_for_fake_run,
+ add_quotes_to_keys,
+ '')
+
+ run_cmd = docker_run_cmd_prefix + ' && ' + \
+ run_cmd if docker_run_cmd_prefix != '' else run_cmd
+
+ return {'return': 0, 'run_cmd_string': run_cmd}
+
+
+############################################################
+def aux_search(i):
+
+ self_module = i['self_module']
+
+ inp = i['input']
+
+ repos = inp.get('repos', '')
+# Grigori Fursin remarked on 20240412 because this line prevents
+# from searching for scripts in other public or private repositories.
+# Not sure why we enforce just 2 repositories
+#
+# if repos == '': repos='internal,a4705959af8e447a'
+
+ parsed_artifact = inp.get('parsed_artifact', [])
+
+ if len(parsed_artifact) < 1:
+ parsed_artifact = [('', ''), ('', '')]
+ elif len(parsed_artifact) < 2:
+ parsed_artifact.append(('', ''))
+ else:
+ repos = parsed_artifact[1][0]
+
+ list_of_repos = repos.split(',') if ',' in repos else [repos]
+
+ ii = utils.sub_input(
+ inp,
+ self_module.cmind.cfg['artifact_keys'] +
+ ['tags'])
+
+ ii['out'] = None
+
+ # Search for automations in repos
+ lst = []
+ for repo in list_of_repos:
+ parsed_artifact[1] = (
+ '', repo) if utils.is_cm_uid(repo) else (
+ repo, '')
+ ii['parsed_artifact'] = parsed_artifact
+ r = self_module.search(ii)
+ if r['return'] > 0:
+ return r
+ lst += r['list']
+
+ return {'return': 0, 'list': lst}
+
+
+############################################################
+def dockerfile(i):
+ """
+ Add CM automation.
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+ (repos) (str): list of repositories to search for automations
+ (output_dir) (str): output directory (./ by default)
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ """
+
+ import copy
+
+ # Check simplified CMD: cm docker script "python app image-classification onnx"
+ # If artifact has spaces, treat them as tags!
+ self_module = i['self_module']
+ self_module.cmind.access(
+ {'action': 'detect_tags_in_artifact', 'automation': 'utils', 'input': i})
+
+ # Prepare "clean" input to replicate command
+ r = self_module.cmind.access({'action': 'prune_input',
+ 'automation': 'utils',
+ 'input': i,
+ 'extra_keys_starts_with': ['docker_']})
+ i_run_cmd_arc = r['new_input']
+
+ cur_dir = os.getcwd()
+
+ quiet = i.get('quiet', False)
+
+ console = i.get('out') == 'con'
+
+ # Search for script(s)
+ r = aux_search({'self_module': self_module, 'input': i})
+ if r['return'] > 0:
+ return r
+
+ lst = r['list']
+
+ if len(lst) == 0:
+ return {'return': 1, 'error': 'no scripts were found'}
+
+
+# if i.get('cmd'):
+# run_cmd = "cm run script " + " ".join( a for a in i['cmd'] if not a.startswith('--docker_') )
+# elif i.get('artifact'):
+# run_cmd = "cm run script "+i['artifact']
+# elif i.get('tags'):
+# run_cmd = "cm run script \""+" "+" ".join(i['tags']) + "\""
+# else:
+# run_cmd = ""
+#
+# run_cmd = i.get('docker_run_cmd_prefix') + ' && ' + run_cmd if i.get('docker_run_cmd_prefix') else run_cmd
+
+ env = i.get('env', {})
+ state = i.get('state', {})
+ const = i.get('const', {})
+ const_state = i.get('const_state', {})
+ script_automation = i['self_module']
+
+ dockerfile_env = i.get('dockerfile_env', {})
+
+ tags_split = i.get('tags', '').split(",")
+ variation_tags = [t[1:] for t in tags_split if t.startswith("_")]
+
+ for artifact in sorted(lst, key=lambda x: x.meta.get('alias', '')):
+
+ meta = artifact.meta
+
+ script_path = artifact.path
+
+ tags = meta.get("tags", [])
+ tag_string = ",".join(tags)
+
+ script_alias = meta.get('alias', '')
+ script_uid = meta.get('uid', '')
+
+ verbose = i.get('v', False)
+ show_time = i.get('show_time', False)
+
+ run_state = {'deps': [], 'fake_deps': [], 'parent': None}
+ run_state['script_id'] = script_alias + "," + script_uid
+ run_state['script_variation_tags'] = variation_tags
+ variations = meta.get('variations', {})
+ docker_settings = meta.get('docker', {})
+ docker_settings['dockerfile_env'] = dockerfile_env
+ state['docker'] = docker_settings
+ add_deps_recursive = i.get('add_deps_recursive', {})
+
+ r = script_automation.update_state_from_meta(
+ meta,
+ env,
+ state,
+ const,
+ const_state,
+ deps=[],
+ post_deps=[],
+ prehook_deps=[],
+ posthook_deps=[],
+ new_env_keys=[],
+ new_state_keys=[],
+ run_state=run_state,
+ i=i)
+ if r['return'] > 0:
+ return r
+
+ r = script_automation._update_state_from_variations(
+ i,
+ meta,
+ variation_tags,
+ variations,
+ env,
+ state,
+ const,
+ const_state,
+ deps=[],
+ post_deps=[],
+ prehook_deps=[],
+ posthook_deps=[],
+ new_env_keys_from_meta=[],
+ new_state_keys_from_meta=[],
+ add_deps_recursive=add_deps_recursive,
+ run_state=run_state,
+ recursion_spaces='',
+ verbose=False)
+ if r['return'] > 0:
+ return r
+
+ docker_settings = state['docker']
+ dockerfile_env = docker_settings['dockerfile_env']
+ dockerfile_env['CM_RUN_STATE_DOCKER'] = True
+
+ if not docker_settings.get('run', True) and not i.get(
+ 'docker_run_override', False):
+ print("docker.run set to False in _cm.json")
+ continue
+ '''run_config_path = os.path.join(script_path,'run_config.yml')
+ if not os.path.exists(run_config_path):
+ print("No run_config.yml file present in {}".format(script_path))
+ continue
+ import yaml
+ with open(run_config_path, 'r') as run_config_file:
+ run_config = yaml.safe_load(run_config_file)
+ docker_settings = run_config.get('docker')
+ if not docker_settings or not docker_settings.get('build') or not run_config.get('run_with_default_inputs'):
+ print("Run config is not configured for docker run in {}".format(run_config_path))
+ continue
+ '''
+
+ deps = docker_settings.get('build_deps', [])
+ if deps:
+ r = script_automation._run_deps(
+ deps,
+ [],
+ env,
+ {},
+ {},
+ {},
+ {},
+ '',
+ [],
+ '',
+ False,
+ '',
+ verbose,
+ show_time,
+ ' ',
+ run_state)
+ if r['return'] > 0:
+ return r
+ # For updating meta from update_meta_if_env
+ r = script_automation.update_state_from_meta(
+ meta,
+ env,
+ state,
+ const,
+ const_state,
+ deps=[],
+ post_deps=[],
+ prehook_deps=[],
+ posthook_deps=[],
+ new_env_keys=[],
+ new_state_keys=[],
+ run_state=run_state,
+ i=i)
+ if r['return'] > 0:
+ return r
+ docker_settings = state['docker']
+
+ d_env = i_run_cmd_arc.get('env', {})
+ for key in list(d_env.keys()):
+ if key.startswith("CM_TMP_"):
+ del (d_env[key])
+
+ # Check if need to update/map/mount inputs and env
+ r = process_inputs({'run_cmd_arc': i_run_cmd_arc,
+ 'docker_settings': docker_settings,
+ 'mounts': []})
+ if r['return'] > 0:
+ return r
+
+ i_run_cmd = r['run_cmd']
+
+ docker_run_cmd_prefix = i.get(
+ 'docker_run_cmd_prefix', docker_settings.get(
+ 'run_cmd_prefix', ''))
+
+ r = regenerate_script_cmd({'script_uid': script_uid,
+ 'script_alias': script_alias,
+ 'run_cmd': i_run_cmd,
+ 'tags': tags,
+ 'fake_run': True,
+ 'docker_settings': docker_settings,
+ 'docker_run_cmd_prefix': docker_run_cmd_prefix})
+ if r['return'] > 0:
+ return r
+
+ run_cmd = r['run_cmd_string']
+
+ cm_repo = i.get(
+ 'docker_cm_repo',
+ docker_settings.get(
+ 'cm_repo',
+ 'mlcommons@cm4mlops'))
+ cm_repo_branch = i.get(
+ 'docker_cm_repo_branch',
+ docker_settings.get(
+ 'cm_repo_branch',
+ 'mlperf-inference'))
+
+ cm_repo_flags = i.get(
+ 'docker_cm_repo_flags',
+ docker_settings.get(
+ 'cm_repo_flags',
+ ''))
+
+ docker_base_image = i.get(
+ 'docker_base_image',
+ docker_settings.get('base_image'))
+ docker_os = i.get(
+ 'docker_os', docker_settings.get(
+ 'docker_os', 'ubuntu'))
+ docker_os_version = i.get(
+ 'docker_os_version', docker_settings.get(
+ 'docker_os_version', '22.04'))
+
+ docker_cm_repos = i.get(
+ 'docker_cm_repos',
+ docker_settings.get(
+ 'cm_repos',
+ ''))
+
+ docker_skip_cm_sys_upgrade = i.get(
+ 'docker_skip_cm_sys_upgrade', docker_settings.get(
+ 'skip_cm_sys_upgrade', ''))
+
+ docker_extra_sys_deps = i.get('docker_extra_sys_deps', '')
+
+ if not docker_base_image:
+ dockerfilename_suffix = docker_os + '_' + docker_os_version
+ else:
+ if os.name == 'nt':
+ dockerfilename_suffix = docker_base_image.replace(
+ '/', '-').replace(':', '-')
+ else:
+ dockerfilename_suffix = docker_base_image.split("/")
+ dockerfilename_suffix = dockerfilename_suffix[len(
+ dockerfilename_suffix) - 1]
+
+ fake_run_deps = i.get(
+ 'fake_run_deps', docker_settings.get(
+ 'fake_run_deps', False))
+ docker_run_final_cmds = docker_settings.get(
+ 'docker_run_final_cmds', [])
+
+ r = check_gh_token(i, docker_settings, quiet)
+ if r['return'] > 0:
+ return r
+ gh_token = r['gh_token']
+ i['docker_gh_token'] = gh_token # To pass to docker function if needed
+
+ if i.get('docker_real_run', docker_settings.get(
+ 'docker_real_run', False)):
+ fake_run_option = " "
+ fake_run_deps = False
+ else:
+ fake_run_option = " --fake_run"
+
+ docker_copy_files = i.get(
+ 'docker_copy_files',
+ docker_settings.get(
+ 'copy_files',
+ []))
+
+ env['CM_DOCKER_PRE_RUN_COMMANDS'] = docker_run_final_cmds
+
+ docker_path = i.get('docker_path', '').strip()
+ if docker_path == '':
+ docker_path = script_path
+
+ dockerfile_path = os.path.join(
+ docker_path,
+ 'dockerfiles',
+ dockerfilename_suffix +
+ '.Dockerfile')
+
+ if i.get('print_deps'):
+ cm_input = {'action': 'run',
+ 'automation': 'script',
+ 'tags': f"""{i.get('tags')}""",
+ 'print_deps': True,
+ 'quiet': True,
+ 'silent': True,
+ 'fake_run': True,
+ 'fake_deps': True
+ }
+ r = self_module.cmind.access(cm_input)
+ if r['return'] > 0:
+ return r
+ print_deps = r['new_state']['print_deps']
+ comments = ["#RUN " + dep for dep in print_deps]
+ comments.append("")
+ comments.append("# Run CM workflow")
+ else:
+ comments = []
+
+ if i.get('docker_push_image', '') in ['True', True, 'yes']:
+ env['CM_DOCKER_PUSH_IMAGE'] = 'yes'
+
+ cm_docker_input = {'action': 'run',
+ 'automation': 'script',
+ 'tags': 'build,dockerfile',
+ 'cm_repo': cm_repo,
+ 'cm_repo_branch': cm_repo_branch,
+ 'cm_repo_flags': cm_repo_flags,
+ 'docker_base_image': docker_base_image,
+ 'docker_os': docker_os,
+ 'docker_os_version': docker_os_version,
+ 'skip_cm_sys_upgrade': docker_skip_cm_sys_upgrade,
+ 'file_path': dockerfile_path,
+ 'fake_run_option': fake_run_option,
+ 'comments': comments,
+ 'run_cmd': f'{run_cmd} --quiet',
+ 'script_tags': f"""{i.get('tags')}""",
+ 'copy_files': docker_copy_files,
+ 'quiet': True,
+ 'env': env,
+ 'dockerfile_env': dockerfile_env,
+ 'v': i.get('v', False),
+ 'fake_docker_deps': fake_run_deps,
+ 'print_deps': True,
+ 'real_run': True
+ }
+
+ if docker_cm_repos != '':
+ cm_docker_input['cm_repos'] = docker_cm_repos
+
+ if gh_token != '':
+ cm_docker_input['gh_token'] = gh_token
+
+ if docker_extra_sys_deps != '':
+ cm_docker_input['extra_sys_deps'] = docker_extra_sys_deps
+
+ r = self_module.cmind.access(cm_docker_input)
+ if r['return'] > 0:
+ return r
+
+ print('')
+ print("Dockerfile generated at " + dockerfile_path)
+
+ return {'return': 0}
+
+# we mount the main folder of the CM cache entry in case any file/folder
+# in that cache entry is needed inside the container
+
+
+def get_host_path(value):
+ path_split = value.split(os.sep)
+ if len(path_split) == 1:
+ return value
+
+ new_value = ''
+ if "cache" in path_split and "local":
+ repo_entry_index = path_split.index("local")
+ if len(path_split) >= repo_entry_index + 3:
+ return os.sep.join(path_split[0:repo_entry_index + 3])
+
+ return value
+
+
+def get_container_path_script(i):
+ tmp_dep_cached_path = i['tmp_dep_cached_path']
+ value_mnt, value_env = get_container_path(tmp_dep_cached_path)
+ return {'return': 0, 'value_mnt': value_mnt, 'value_env': value_env}
+
+
+def get_container_path(value):
+ path_split = value.split(os.sep)
+ if len(path_split) == 1:
+ return value
+
+ new_value = ''
+ if "cache" in path_split and "local" in path_split:
+ new_path_split = ["", "home", "cmuser", "CM", "repos"]
+ repo_entry_index = path_split.index("local")
+ if len(path_split) >= repo_entry_index + 3:
+ new_path_split1 = new_path_split + \
+ path_split[repo_entry_index:repo_entry_index + 3]
+ new_path_split2 = new_path_split + path_split[repo_entry_index:]
+ return "/".join(new_path_split1), "/".join(new_path_split2)
+ else:
+ orig_path, target_path = update_path_for_docker(path=value)
+ return target_path, target_path
+
+ # return value, value
+
+
+############################################################
+def docker(i):
+ """
+ CM automation to run CM scripts via Docker
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ """
+
+ import copy
+ import re
+
+ from cmind import __version__ as current_cm_version
+
+ self_module = i['self_module']
+
+ if isinstance(i.get('docker', None), dict):
+ # Grigori started cleaning and refactoring this code on 20240929
+ #
+ # 1. use --docker dictionary instead of --docker_{keys}
+
+ if utils.compare_versions(current_cm_version, '2.3.8.1') >= 0:
+ docker_params = utils.convert_dictionary(i['docker'], 'docker')
+ i.update(docker_params)
+ del (i['docker'])
+
+ quiet = i.get('quiet', False)
+
+ detached = i.get('docker_detached', '')
+ if detached == '':
+ detached = i.get('docker_dt', '')
+ if detached == '':
+ detached = 'no'
+
+ interactive = i.get('docker_interactive', '')
+ if interactive == '':
+ interactive = i.get('docker_it', '')
+
+ verbose = i.get('v', False)
+ show_time = i.get('show_time', False)
+
+ # Check simplified CMD: cm docker script "python app image-classification onnx"
+ # If artifact has spaces, treat them as tags!
+ self_module.cmind.access(
+ {'action': 'detect_tags_in_artifact', 'automation': 'utils', 'input': i})
+
+ # CAREFUL -> artifacts and parsed_artifacts are not supported in input
+ # (and should not be?)
+ if 'artifacts' in i:
+ del (i['artifacts'])
+ if 'parsed_artifacts' in i:
+ del (i['parsed_artifacts'])
+
+ # Prepare "clean" input to replicate command
+ r = self_module.cmind.access({'action': 'prune_input',
+ 'automation': 'utils',
+ 'input': i,
+ 'extra_keys_starts_with': ['docker_']})
+ i_run_cmd_arc = r['new_input']
+
+ env = i.get('env', {})
+
+ noregenerate_docker_file = i.get('docker_noregenerate', False)
+ norecreate_docker_image = i.get('docker_norecreate', True)
+
+ if i.get('docker_skip_build', False):
+ noregenerate_docker_file = True
+ norecreate_docker_image = True
+ env['CM_DOCKER_SKIP_BUILD'] = 'yes'
+
+ # Check available configurations
+ docker_cfg = i.get('docker_cfg', '')
+ docker_cfg_uid = i.get('docker_cfg_uid', '')
+
+ if docker_cfg != '' or docker_cfg_uid != '':
+ # Check if docker_cfg is turned on but not selected
+ if isinstance(docker_cfg, bool) or str(
+ docker_cfg).lower() in ['true', 'yes']:
+ docker_cfg = ''
+
+ r = self_module.cmind.access({'action': 'select_cfg',
+ 'automation': 'utils,dc2743f8450541e3',
+ 'tags': 'basic,docker,configurations',
+ 'title': 'docker',
+ 'alias': docker_cfg,
+ 'uid': docker_cfg_uid})
+ if r['return'] > 0:
+ if r['return'] == 16:
+ return {'return': 1, 'error': 'Docker configuration {} was not found'.format(
+ docker_cfg)}
+ return r
+
+ selection = r['selection']
+
+ docker_input_update = selection['meta']['input']
+
+ i.update(docker_input_update)
+
+ ##########################################################################
+ # Run dockerfile
+ if not noregenerate_docker_file:
+ r = utils.call_internal_module(
+ self_module, __file__, 'module_misc', 'dockerfile', i)
+ if r['return'] > 0:
+ return r
+
+ # Save current directory
+ cur_dir = os.getcwd()
+
+ console = i.get('out') == 'con'
+
+ # Search for script(s)
+ r = aux_search({'self_module': self_module, 'input': i})
+ if r['return'] > 0:
+ return r
+
+ lst = r['list']
+
+ if len(lst) == 0:
+ return {'return': 1, 'error': 'no scripts were found'}
+
+ env['CM_RUN_STATE_DOCKER'] = False
+ script_automation = i['self_module']
+ state = i.get('state', {})
+ const = i.get('const', {})
+ const_state = i.get('const_state', {})
+
+ tags_split = i.get('tags', '').split(",")
+ variation_tags = [t[1:] for t in tags_split if t.startswith("_")]
+
+ docker_cache = i.get('docker_cache', "yes")
+ if docker_cache in ["no", False, "False"]:
+ if 'CM_DOCKER_CACHE' not in env:
+ env['CM_DOCKER_CACHE'] = docker_cache
+
+ image_repo = i.get('docker_image_repo', '')
+ if image_repo == '':
+ image_repo = 'local'
+
+ # Host system needs to have docker
+ r = self_module.cmind.access({'action': 'run',
+ 'automation': 'script',
+ 'tags': "get,docker"})
+ if r['return'] > 0:
+ return r
+
+ for artifact in sorted(lst, key=lambda x: x.meta.get('alias', '')):
+
+ meta = artifact.meta
+
+ if i.get('help', False):
+ return utils.call_internal_module(self_module, __file__, 'module_help', 'print_help', {
+ 'meta': meta, 'path': artifact.path})
+
+ script_path = artifact.path
+
+ tags = meta.get("tags", [])
+ tag_string = ",".join(tags)
+
+ script_alias = meta.get('alias', '')
+ script_uid = meta.get('uid', '')
+
+ mounts = copy.deepcopy(i.get('docker_mounts', []))
+
+ '''run_config_path = os.path.join(script_path,'run_config.yml')
+ if not os.path.exists(run_config_path):
+ print("No run_config.yml file present in {}".format(script_path))
+ continue
+ import yaml
+ with open(run_config_path, 'r') as run_config_file:
+ run_config = yaml.safe_load(run_config_file)
+ '''
+
+ variations = meta.get('variations', {})
+ docker_settings = meta.get('docker', {})
+ state['docker'] = docker_settings
+ # Todo: Support state, const and add_deps_recursive
+ run_state = {'deps': [], 'fake_deps': [], 'parent': None}
+ run_state['script_id'] = script_alias + "," + script_uid
+ run_state['script_variation_tags'] = variation_tags
+ add_deps_recursive = i.get('add_deps_recursive', {})
+
+ r = script_automation.update_state_from_meta(
+ meta,
+ env,
+ state,
+ const,
+ const_state,
+ deps=[],
+ post_deps=[],
+ prehook_deps=[],
+ posthook_deps=[],
+ new_env_keys=[],
+ new_state_keys=[],
+ run_state=run_state,
+ i=i)
+ if r['return'] > 0:
+ return r
+
+ r = script_automation._update_state_from_variations(
+ i,
+ meta,
+ variation_tags,
+ variations,
+ env,
+ state,
+ const,
+ const_state,
+ deps=[],
+ post_deps=[],
+ prehook_deps=[],
+ posthook_deps=[],
+ new_env_keys_from_meta=[],
+ new_state_keys_from_meta=[],
+ add_deps_recursive=add_deps_recursive,
+ run_state=run_state,
+ recursion_spaces='',
+ verbose=False)
+ if r['return'] > 0:
+ return r
+
+ docker_settings = state['docker']
+
+ if not docker_settings.get('run', True) and not i.get(
+ 'docker_run_override', False):
+ print("docker.run set to False in _cm.json")
+ continue
+ '''
+ if not docker_settings or not docker_settings.get('build') or not run_config.get('run_with_default_inputs'):
+ print("Run config is not configured for docker run in {}".format(run_config_path))
+ continue
+ '''
+
+ # Check if need to update/map/mount inputs and env
+ r = process_inputs({'run_cmd_arc': i_run_cmd_arc,
+ 'docker_settings': docker_settings,
+ 'mounts': mounts})
+ if r['return'] > 0:
+ return r
+
+ i_run_cmd = r['run_cmd']
+
+ # Check if need to mount home directory
+ current_path_target = '/cm-mount/current'
+ if docker_settings.get('mount_current_dir', '') == 'yes':
+ update_path_for_docker(
+ '.', mounts, force_path_target=current_path_target)
+
+ _os = i.get('docker_os', docker_settings.get('os', 'ubuntu'))
+ version = i.get(
+ 'docker_os_version',
+ docker_settings.get(
+ 'os_version',
+ '22.04'))
+
+ build_deps = docker_settings.get('deps', [])
+ deps = docker_settings.get('deps', [])
+ deps = build_deps + deps
+ if deps:
+ r = script_automation._run_deps(
+ deps,
+ [],
+ env,
+ {},
+ {},
+ {},
+ {},
+ '',
+ [],
+ '',
+ False,
+ '',
+ verbose,
+ show_time,
+ ' ',
+ run_state)
+ if r['return'] > 0:
+ return r
+
+ # For updating meta from update_meta_if_env
+ r = script_automation.update_state_from_meta(
+ meta,
+ env,
+ state,
+ const,
+ const_state,
+ deps=[],
+ post_deps=[],
+ prehook_deps=[],
+ posthook_deps=[],
+ new_env_keys=[],
+ new_state_keys=[],
+ run_state=run_state,
+ i=i)
+ if r['return'] > 0:
+ return r
+
+ docker_settings = state['docker']
+
+ for key in docker_settings.get('mounts', []):
+ mounts.append(key)
+
+ # Updating environment variables from CM input based on input_mapping
+ # from meta
+ input_mapping = meta.get('input_mapping', {})
+
+ for c_input in input_mapping:
+ if c_input in i:
+ env[input_mapping[c_input]] = i[c_input]
+ # del(i[c_input])
+
+ # Updating environment variables from CM input based on
+ # docker_input_mapping from meta
+
+ docker_input_mapping = docker_settings.get('docker_input_mapping', {})
+
+ for c_input in docker_input_mapping:
+ if c_input in i:
+ env[docker_input_mapping[c_input]] = i[c_input]
+ # del(i[c_input])
+
+ # env keys corresponding to container mounts are explicitly passed to
+ # the container run cmd
+ container_env_string = ''
+ for index in range(len(mounts)):
+ mount = mounts[index]
+ # Since windows may have 2 :, we search from the right
+ j = mount.rfind(':')
+ if j > 0:
+ mount_parts = [mount[:j], mount[j + 1:]]
+ else:
+ return {
+ 'return': 1, 'error': 'Can\'t find separator : in a mount string: {}'.format(mount)}
+
+# mount_parts = mount.split(":")
+# if len(mount_parts) != 2:
+# return {'return': 1, 'error': f'Invalid mount specified in docker
+# settings'}
+
+ host_mount = mount_parts[0]
+ new_host_mount = host_mount
+ container_mount = mount_parts[1]
+ new_container_mount = container_mount
+
+ tmp_values = re.findall(r'\${{ (.*?) }}', str(host_mount))
+ skip = False
+ host_env_key = None
+ if tmp_values:
+ for tmp_value in tmp_values:
+ if tmp_value in env:
+ host_env_key = tmp_value
+ new_host_mount = get_host_path(env[tmp_value])
+ else: # we skip those mounts
+ mounts[index] = None
+ skip = True
+ break
+
+ tmp_values = re.findall(r'\${{ (.*?) }}', str(container_mount))
+ if tmp_values:
+ for tmp_value in tmp_values:
+ container_env_key = tmp_value
+ if tmp_value in env:
+ new_container_mount, new_container_mount_env = get_container_path(
+ env[tmp_value])
+ container_env_key = new_container_mount_env
+ # container_env_string += " --env.{}={} ".format(tmp_value, new_container_mount_env)
+ else: # we skip those mounts
+ mounts[index] = None
+ skip = True
+ break
+ else:
+ container_env_key = str(container_mount)
+
+ if skip:
+ continue
+ mounts[index] = new_host_mount + ":" + new_container_mount
+ if host_env_key:
+ container_env_string += " --env.{}={} ".format(
+ host_env_key, container_env_key)
+
+ for v in docker_input_mapping:
+ if docker_input_mapping[v] == host_env_key:
+ i[v] = container_env_key
+ i_run_cmd[v] = container_env_key
+
+ mounts = list(filter(lambda item: item is not None, mounts))
+
+ mount_string = "" if len(mounts) == 0 else ",".join(mounts)
+
+ # check for proxy settings and pass onto the docker
+ proxy_keys = [
+ "ftp_proxy",
+ "FTP_PROXY",
+ "http_proxy",
+ "HTTP_PROXY",
+ "https_proxy",
+ "HTTPS_PROXY",
+ "no_proxy",
+ "NO_PROXY",
+ "socks_proxy",
+ "SOCKS_PROXY",
+ "GH_TOKEN"]
+
+ if env.get('+ CM_DOCKER_BUILD_ARGS', []) == []:
+ env['+ CM_DOCKER_BUILD_ARGS'] = []
+
+ for key in proxy_keys:
+ if os.environ.get(key, '') != '':
+ value = os.environ[key]
+ container_env_string += " --env.{}={} ".format(key, value)
+ env['+ CM_DOCKER_BUILD_ARGS'].append(
+ "{}={}".format(key, value))
+
+ docker_use_host_group_id = i.get(
+ 'docker_use_host_group_id',
+ docker_settings.get('use_host_group_id'))
+ if str(docker_use_host_group_id).lower() not in [
+ 'false', 'no', '0'] and os.name != 'nt':
+ env['+ CM_DOCKER_BUILD_ARGS'].append(
+ "{}={}".format('GID', '\\" $(id -g $USER) \\"'))
+
+ docker_use_host_user_id = i.get(
+ 'docker_use_host_user_id',
+ docker_settings.get('use_host_user_id'))
+ if str(docker_use_host_user_id).lower() not in [
+ 'false', 'no', '0'] and os.name != 'nt':
+ env['+ CM_DOCKER_BUILD_ARGS'].append(
+ "{}={}".format('UID', '\\" $(id -u $USER) \\"'))
+
+ docker_base_image = i.get(
+ 'docker_base_image',
+ docker_settings.get('base_image'))
+ docker_os = i.get('docker_os', docker_settings.get('os', 'ubuntu'))
+ docker_os_version = i.get(
+ 'docker_os_version', docker_settings.get(
+ 'os_version', '22.04'))
+ image_tag_extra = i.get(
+ 'docker_image_tag_extra',
+ docker_settings.get(
+ 'image_tag_extra',
+ '-latest'))
+
+ if not docker_base_image:
+ dockerfilename_suffix = docker_os + '_' + docker_os_version
+ else:
+ if os.name == 'nt':
+ dockerfilename_suffix = docker_base_image.replace(
+ '/', '-').replace(':', '-')
+ else:
+ dockerfilename_suffix = docker_base_image.split("/")
+ dockerfilename_suffix = dockerfilename_suffix[len(
+ dockerfilename_suffix) - 1]
+
+ cm_repo = i.get(
+ 'docker_cm_repo',
+ docker_settings.get(
+ 'cm_repo',
+ 'mlcommons@cm4mlops'))
+
+ docker_path = i.get('docker_path', '').strip()
+ if docker_path == '':
+ docker_path = script_path
+
+ dockerfile_path = os.path.join(
+ docker_path,
+ 'dockerfiles',
+ dockerfilename_suffix +
+ '.Dockerfile')
+
+ # Skips docker run cmd and gives an interactive shell to the user
+ docker_skip_run_cmd = i.get(
+ 'docker_skip_run_cmd', docker_settings.get(
+ 'skip_run_cmd', False))
+
+ docker_pre_run_cmds = i.get(
+ 'docker_pre_run_cmds', []) + docker_settings.get('pre_run_cmds', [])
+
+ docker_run_cmd_prefix = i.get(
+ 'docker_run_cmd_prefix', docker_settings.get(
+ 'run_cmd_prefix', ''))
+
+ all_gpus = i.get('docker_all_gpus', docker_settings.get('all_gpus'))
+
+ num_gpus = i.get('docker_num_gpus', docker_settings.get('num_gpus'))
+
+ device = i.get('docker_device', docker_settings.get('device'))
+
+ image_name = i.get(
+ 'docker_image_name',
+ docker_settings.get(
+ 'image_name',
+ ''))
+
+ r = check_gh_token(i, docker_settings, quiet)
+ if r['return'] > 0:
+ return r
+ gh_token = r['gh_token']
+
+ port_maps = i.get(
+ 'docker_port_maps',
+ docker_settings.get(
+ 'port_maps',
+ []))
+
+ shm_size = i.get(
+ 'docker_shm_size',
+ docker_settings.get(
+ 'shm_size',
+ ''))
+
+ pass_user_id = i.get(
+ 'docker_pass_user_id',
+ docker_settings.get(
+ 'pass_user_id',
+ ''))
+ pass_user_group = i.get(
+ 'docker_pass_user_group',
+ docker_settings.get(
+ 'pass_user_group',
+ ''))
+
+ extra_run_args = i.get(
+ 'docker_extra_run_args',
+ docker_settings.get(
+ 'extra_run_args',
+ ''))
+
+ if detached == '':
+ detached = docker_settings.get('detached', '')
+
+ if str(docker_skip_run_cmd).lower() in ['true', '1', 'yes']:
+ interactive = 'yes'
+ elif interactive == '':
+ interactive = docker_settings.get('interactive', '')
+
+
+# # Regenerate run_cmd
+# if i.get('cmd'):
+# run_cmd = "cm run script " + " ".join( a for a in i['cmd'] if not a.startswith('--docker_') )
+# elif i.get('artifact'):
+# run_cmd = "cm run script "+i['artifact']
+# elif i.get('tags'):
+# run_cmd = "cm run script \""+" "+" ".join(i['tags']) + "\""
+# else:
+# run_cmd = ""
+
+ r = regenerate_script_cmd({'script_uid': script_uid,
+ 'script_alias': script_alias,
+ 'tags': tags,
+ 'run_cmd': i_run_cmd,
+ 'docker_settings': docker_settings,
+ 'docker_run_cmd_prefix': i.get('docker_run_cmd_prefix', '')})
+ if r['return'] > 0:
+ return r
+ run_cmd = r['run_cmd_string'] + ' ' + \
+ container_env_string + ' --docker_run_deps '
+
+ env['CM_RUN_STATE_DOCKER'] = True
+
+ if docker_settings.get('mount_current_dir', '') == 'yes':
+ run_cmd = 'cd ' + current_path_target + ' && ' + run_cmd
+
+ final_run_cmd = run_cmd if docker_skip_run_cmd not in [
+ 'yes', True, 'True'] else 'cm version'
+
+ print('')
+ print('CM command line regenerated to be used inside Docker:')
+ print('')
+ print(final_run_cmd)
+ print('')
+
+ docker_recreate_image = 'yes' if not norecreate_docker_image else 'no'
+
+ if i.get('docker_push_image', '') in ['True', True, 'yes']:
+ env['CM_DOCKER_PUSH_IMAGE'] = 'yes'
+
+ cm_docker_input = {'action': 'run',
+ 'automation': 'script',
+ 'tags': 'run,docker,container',
+ 'recreate': docker_recreate_image,
+ 'docker_base_image': docker_base_image,
+ 'docker_os': docker_os,
+ 'docker_os_version': docker_os_version,
+ 'cm_repo': cm_repo,
+ 'env': env,
+ 'image_repo': image_repo,
+ 'interactive': interactive,
+ 'mounts': mounts,
+ 'image_name': image_name,
+ # 'image_tag': script_alias,
+ 'image_tag_extra': image_tag_extra,
+ 'detached': detached,
+ 'script_tags': f"""{i.get('tags')}""",
+ 'run_cmd': final_run_cmd,
+ 'v': i.get('v', False),
+ 'quiet': True,
+ 'pre_run_cmds': docker_pre_run_cmds,
+ 'real_run': True,
+ 'add_deps_recursive': {
+ 'build-docker-image': {
+ 'dockerfile': dockerfile_path
+ }
+ }
+ }
+
+ if all_gpus:
+ cm_docker_input['all_gpus'] = True
+
+ if num_gpus:
+ cm_docker_input['num_gpus'] = str(num_gpus)
+
+ if device:
+ cm_docker_input['device'] = device
+
+ if gh_token != '':
+ cm_docker_input['gh_token'] = gh_token
+
+ if port_maps:
+ cm_docker_input['port_maps'] = port_maps
+
+ if shm_size != '':
+ cm_docker_input['shm_size'] = shm_size
+
+ if pass_user_id != '':
+ cm_docker_input['pass_user_id'] = pass_user_id
+
+ if pass_user_group != '':
+ cm_docker_input['pass_user_group'] = pass_user_group
+
+ if extra_run_args != '':
+ cm_docker_input['extra_run_args'] = extra_run_args
+
+ if i.get('docker_save_script', ''):
+ cm_docker_input['save_script'] = i['docker_save_script']
+
+ print('')
+
+ r = self_module.cmind.access(cm_docker_input)
+ if r['return'] > 0:
+ return r
+
+ return {'return': 0}
+
+############################################################
+
+
+def check_gh_token(i, docker_settings, quiet):
+ gh_token = i.get('docker_gh_token', '')
+
+ if docker_settings.get('gh_token_required', False) and gh_token == '':
+ rx = {
+ 'return': 1,
+ 'error': 'GH token is required but not provided. Use --docker_gh_token to set it'}
+
+ if quiet:
+ return rx
+
+ print('')
+ gh_token = input(
+ 'Enter GitHub token to access private CM repositories required for this CM script: ')
+
+ if gh_token == '':
+ return rx
+
+ return {'return': 0, 'gh_token': gh_token}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/README-extra.md b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/README-extra.md
new file mode 100644
index 0000000000..05e53dc1a0
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/README-extra.md
@@ -0,0 +1,2 @@
+# CM script to run and reproduce experiments
+
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/_cm.yaml b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/_cm.yaml
new file mode 100644
index 0000000000..8019b3647e
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/_cm.yaml
@@ -0,0 +1,38 @@
+cache: false
+
+deps:
+ # Detect host OS features
+ - tags: detect,os
+
+ # Detect/install python
+ - tags: get,python
+ names:
+ - python
+ - python3
+
+script_name: run
+
+input_mapping:
+ experiment: CM_EXPERIMENT
+
+default_env:
+ CM_EXPERIMENT: '1'
+
+variations:
+ install_deps:
+ script_name: install_deps
+
+ run:
+ script_name: run
+
+ reproduce:
+ script_name: reproduce
+
+ plot:
+ script_name: plot
+
+ analyze:
+ script_name: analyze
+
+ validate:
+ script_name: validate
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/analyze.bat b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/analyze.bat
new file mode 100644
index 0000000000..7e786771ae
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/analyze.bat
@@ -0,0 +1,12 @@
+@echo off
+
+set CUR_DIR=%cd%
+
+echo.
+echo Current execution path: %CUR_DIR%
+echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH%
+echo ENV CM_EXPERIMENT: %CM_EXPERIMENT%
+
+rem echo.
+rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py
+rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/analyze.sh b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/analyze.sh
new file mode 100644
index 0000000000..630c3db3dd
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/analyze.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+CUR_DIR=${PWD}
+
+echo ""
+echo "Current execution path: ${CUR_DIR}"
+echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}"
+echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}"
+
+#echo ""
+#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py
+#test $? -eq 0 || exit 1
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/customize.py b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/customize.py
new file mode 100644
index 0000000000..273999d460
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/customize.py
@@ -0,0 +1,24 @@
+from cmind import utils
+import os
+
+
+def preprocess(i):
+
+ os_info = i['os_info']
+
+ env = i['env']
+
+ meta = i['meta']
+
+ automation = i['automation']
+
+ quiet = (env.get('CM_QUIET', False) == 'yes')
+
+ return {'return': 0}
+
+
+def postprocess(i):
+
+ env = i['env']
+
+ return {'return': 0}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/install_deps.bat b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/install_deps.bat
new file mode 100644
index 0000000000..47f7e7ce26
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/install_deps.bat
@@ -0,0 +1,18 @@
+@echo off
+
+set CUR_DIR=%cd%
+
+echo.
+echo Current execution path: %CUR_DIR%
+echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH%
+echo ENV CM_EXPERIMENT: %CM_EXPERIMENT%
+
+if exist "%CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" (
+
+ echo.
+ echo Installing requirements.txt ...
+ echo.
+
+ %CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt
+ IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
+)
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/install_deps.sh b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/install_deps.sh
new file mode 100644
index 0000000000..cb7c44c2bc
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/install_deps.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+CUR_DIR=${PWD}
+
+echo ""
+echo "Current execution path: ${CUR_DIR}"
+echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}"
+echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}"
+
+if test -f "${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then
+ echo ""
+ echo "Installing requirements.txt ..."
+ echo ""
+
+ ${CM_PYTHON_BIN_WITH_PATH} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt
+ test $? -eq 0 || exit 1
+fi
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/main.py b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/main.py
new file mode 100644
index 0000000000..caa499bf08
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/main.py
@@ -0,0 +1,10 @@
+import os
+
+if __name__ == "__main__":
+
+ print('')
+ print('Main script:')
+ print('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT', '')))
+ print('')
+
+ exit(0)
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/plot.bat b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/plot.bat
new file mode 100644
index 0000000000..7e786771ae
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/plot.bat
@@ -0,0 +1,12 @@
+@echo off
+
+set CUR_DIR=%cd%
+
+echo.
+echo Current execution path: %CUR_DIR%
+echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH%
+echo ENV CM_EXPERIMENT: %CM_EXPERIMENT%
+
+rem echo.
+rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py
+rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/plot.sh b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/plot.sh
new file mode 100644
index 0000000000..630c3db3dd
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/plot.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+CUR_DIR=${PWD}
+
+echo ""
+echo "Current execution path: ${CUR_DIR}"
+echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}"
+echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}"
+
+#echo ""
+#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py
+#test $? -eq 0 || exit 1
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/reproduce.bat b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/reproduce.bat
new file mode 100644
index 0000000000..7e786771ae
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/reproduce.bat
@@ -0,0 +1,12 @@
+@echo off
+
+set CUR_DIR=%cd%
+
+echo.
+echo Current execution path: %CUR_DIR%
+echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH%
+echo ENV CM_EXPERIMENT: %CM_EXPERIMENT%
+
+rem echo.
+rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py
+rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/reproduce.sh b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/reproduce.sh
new file mode 100644
index 0000000000..630c3db3dd
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/reproduce.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+CUR_DIR=${PWD}
+
+echo ""
+echo "Current execution path: ${CUR_DIR}"
+echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}"
+echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}"
+
+#echo ""
+#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py
+#test $? -eq 0 || exit 1
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/run.bat b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/run.bat
new file mode 100644
index 0000000000..6c1274ce64
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/run.bat
@@ -0,0 +1,12 @@
+@echo off
+
+set CUR_DIR=%cd%
+
+echo.
+echo Current execution path: %CUR_DIR%
+echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH%
+echo ENV CM_EXPERIMENT: %CM_EXPERIMENT%
+
+echo.
+%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py
+IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/run.sh b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/run.sh
new file mode 100644
index 0000000000..2150b45dcd
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/run.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+CUR_DIR=${PWD}
+
+echo ""
+echo "Current execution path: ${CUR_DIR}"
+echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}"
+echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}"
+
+echo ""
+${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py
+test $? -eq 0 || exit 1
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/validate.bat b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/validate.bat
new file mode 100644
index 0000000000..7e786771ae
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/validate.bat
@@ -0,0 +1,12 @@
+@echo off
+
+set CUR_DIR=%cd%
+
+echo.
+echo Current execution path: %CUR_DIR%
+echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH%
+echo ENV CM_EXPERIMENT: %CM_EXPERIMENT%
+
+rem echo.
+rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py
+rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/validate.sh b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/validate.sh
new file mode 100644
index 0000000000..630c3db3dd
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/validate.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+CUR_DIR=${PWD}
+
+echo ""
+echo "Current execution path: ${CUR_DIR}"
+echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}"
+echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}"
+
+#echo ""
+#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py
+#test $? -eq 0 || exit 1
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-python/README-extra.md b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/README-extra.md
new file mode 100644
index 0000000000..582991f6d2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/README-extra.md
@@ -0,0 +1 @@
+# CM script
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-python/_cm.yaml b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/_cm.yaml
new file mode 100644
index 0000000000..adbb8d4e7c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/_cm.yaml
@@ -0,0 +1,23 @@
+cache: false
+
+deps:
+ # Detect host OS features
+ - tags: detect,os
+
+ # Detect/install python
+ - tags: get,python
+ names:
+ - python
+ - python3
+
+input_mapping:
+ var1: CM_VAR1
+ req: PIP_REQUIREMENTS
+
+default_env:
+ CM_VAR1: 'something'
+
+variations:
+ req:
+ env:
+ PIP_REQUIREMENTS: True
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-python/customize.py b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/customize.py
new file mode 100644
index 0000000000..625b643d44
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/customize.py
@@ -0,0 +1,32 @@
+from cmind import utils
+import os
+
+
+def preprocess(i):
+
+ print('')
+ print('Preprocessing ...')
+
+ os_info = i['os_info']
+
+ env = i['env']
+
+ meta = i['meta']
+
+ automation = i['automation']
+
+ quiet = (env.get('CM_QUIET', False) == 'yes')
+
+ print(' ENV CM_VAR1: {}'.format(env.get('CM_VAR1', '')))
+
+ return {'return': 0}
+
+
+def postprocess(i):
+
+ print('')
+ print('Postprocessing ...')
+
+ env = i['env']
+
+ return {'return': 0}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-python/main.py b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/main.py
new file mode 100644
index 0000000000..e3302f36fa
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/main.py
@@ -0,0 +1,10 @@
+import os
+
+if __name__ == "__main__":
+
+ print('')
+ print('Main script:')
+ print('ENV CM_VAR1: {}'.format(os.environ.get('CM_VAR1', '')))
+ print('')
+
+ exit(0)
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-python/requirements.txt b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/requirements.txt
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-python/run.bat b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/run.bat
new file mode 100644
index 0000000000..f9e1264bc8
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/run.bat
@@ -0,0 +1,25 @@
+@echo off
+
+set CUR_DIR=%cd%
+
+echo.
+echo Current execution path: %CUR_DIR%
+echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH%
+echo ENV PIP_REQUIREMENTS: %PIP_REQUIREMENTS%
+echo ENV CM_VAR1: %CM_VAR1%
+
+if "%PIP_REQUIREMENTS%" == "True" (
+ if exist "%CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" (
+
+ echo.
+ echo Installing requirements.txt ...
+ echo.
+
+ %CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt
+ IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
+ )
+)
+
+echo.
+%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py
+IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-python/run.sh b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/run.sh
new file mode 100644
index 0000000000..a1a6aec2e2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/run.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+CUR_DIR=${PWD}
+
+echo ""
+echo "Current execution path: ${CUR_DIR}"
+echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}"
+echo "ENV PIP_REQUIREMENTS: ${PIP_REQUIREMENTS}"
+echo "ENV CM_VAR1: ${CM_VAR1}"
+
+if [ "${PIP_REQUIREMENTS}" == "True" ]; then
+ if test -f "${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then
+ echo ""
+ echo "Installing requirements.txt ..."
+ echo ""
+
+ ${CM_PYTHON_BIN_WITH_PATH} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt
+ test $? -eq 0 || exit 1
+ fi
+fi
+
+echo ""
+${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py
+test $? -eq 0 || exit 1
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/README-extra.md b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/README-extra.md
new file mode 100644
index 0000000000..582991f6d2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/README-extra.md
@@ -0,0 +1 @@
+# CM script
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/_cm.yaml b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/_cm.yaml
new file mode 100644
index 0000000000..eaff95e47d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/_cm.yaml
@@ -0,0 +1,42 @@
+cache: false
+
+deps:
+ # Detect host OS features
+ - tags: detect,os
+
+ # Detect/install python
+ - tags: get,python
+ names:
+ - python
+ - python3
+
+ - tags: get,generic-python-lib,_torch
+ skip_if_env:
+ USE_CUDA:
+ - yes
+
+ - tags: get,generic-python-lib,_torch_cuda
+ enable_if_env:
+ USE_CUDA:
+ - yes
+
+ - tags: get,generic-python-lib,_package.numpy
+
+
+input_mapping:
+ var1: CM_VAR1
+ req: PIP_REQUIREMENTS
+
+default_env:
+ CM_VAR1: 'something'
+
+variations:
+ req:
+ env:
+ PIP_REQUIREMENTS: True
+
+ cuda:
+ env:
+ USE_CUDA: yes
+ deps:
+ - tags: get,cuda
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/customize.py b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/customize.py
new file mode 100644
index 0000000000..625b643d44
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/customize.py
@@ -0,0 +1,32 @@
+from cmind import utils
+import os
+
+
+def preprocess(i):
+
+ print('')
+ print('Preprocessing ...')
+
+ os_info = i['os_info']
+
+ env = i['env']
+
+ meta = i['meta']
+
+ automation = i['automation']
+
+ quiet = (env.get('CM_QUIET', False) == 'yes')
+
+ print(' ENV CM_VAR1: {}'.format(env.get('CM_VAR1', '')))
+
+ return {'return': 0}
+
+
+def postprocess(i):
+
+ print('')
+ print('Postprocessing ...')
+
+ env = i['env']
+
+ return {'return': 0}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/main.py b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/main.py
new file mode 100644
index 0000000000..217aed3b9d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/main.py
@@ -0,0 +1,15 @@
+import os
+
+import torch
+
+if __name__ == "__main__":
+
+ print('')
+ print('Main script:')
+ print('ENV CM_VAR1: {}'.format(os.environ.get('CM_VAR1', '')))
+ print('ENV USE_CUDA: {}'.format(os.environ.get('USE_CUDA', '')))
+ print('')
+ print('PyTorch version: {}'.format(torch.__version__))
+ print('')
+
+ exit(0)
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/requirements.txt b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/requirements.txt
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/run.bat b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/run.bat
new file mode 100644
index 0000000000..f9e1264bc8
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/run.bat
@@ -0,0 +1,25 @@
+@echo off
+
+set CUR_DIR=%cd%
+
+echo.
+echo Current execution path: %CUR_DIR%
+echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH%
+echo ENV PIP_REQUIREMENTS: %PIP_REQUIREMENTS%
+echo ENV CM_VAR1: %CM_VAR1%
+
+if "%PIP_REQUIREMENTS%" == "True" (
+ if exist "%CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" (
+
+ echo.
+ echo Installing requirements.txt ...
+ echo.
+
+ %CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt
+ IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
+ )
+)
+
+echo.
+%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py
+IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/run.sh b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/run.sh
new file mode 100644
index 0000000000..a1a6aec2e2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/run.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+CUR_DIR=${PWD}
+
+echo ""
+echo "Current execution path: ${CUR_DIR}"
+echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}"
+echo "ENV PIP_REQUIREMENTS: ${PIP_REQUIREMENTS}"
+echo "ENV CM_VAR1: ${CM_VAR1}"
+
+if [ "${PIP_REQUIREMENTS}" == "True" ]; then
+ if test -f "${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then
+ echo ""
+ echo "Installing requirements.txt ..."
+ echo ""
+
+ ${CM_PYTHON_BIN_WITH_PATH} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt
+ test $? -eq 0 || exit 1
+ fi
+fi
+
+echo ""
+${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py
+test $? -eq 0 || exit 1
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template/README-extra.md b/cmx4mlops/cmx4mlops/repo/automation/script/template/README-extra.md
new file mode 100644
index 0000000000..582991f6d2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template/README-extra.md
@@ -0,0 +1 @@
+# CM script
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template/customize.py b/cmx4mlops/cmx4mlops/repo/automation/script/template/customize.py
new file mode 100644
index 0000000000..273999d460
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template/customize.py
@@ -0,0 +1,24 @@
+from cmind import utils
+import os
+
+
+def preprocess(i):
+
+ os_info = i['os_info']
+
+ env = i['env']
+
+ meta = i['meta']
+
+ automation = i['automation']
+
+ quiet = (env.get('CM_QUIET', False) == 'yes')
+
+ return {'return': 0}
+
+
+def postprocess(i):
+
+ env = i['env']
+
+ return {'return': 0}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template/run.bat b/cmx4mlops/cmx4mlops/repo/automation/script/template/run.bat
new file mode 100644
index 0000000000..648302ca71
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template/run.bat
@@ -0,0 +1 @@
+rem native script
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template/run.sh b/cmx4mlops/cmx4mlops/repo/automation/script/template/run.sh
new file mode 100644
index 0000000000..4c23c380ea
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template/run.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH}
+
+#To export any variable
+#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out
+
+#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency
+
+echo "Running: "
+echo "${CM_RUN_CMD}"
+echo ""
+
+if [[ ${CM_FAKE_RUN} != "yes" ]]; then
+ eval "${CM_RUN_CMD}"
+ test $? -eq 0 || exit 1
+fi
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template_list_of_scripts.md b/cmx4mlops/cmx4mlops/repo/automation/script/template_list_of_scripts.md
new file mode 100644
index 0000000000..198a500f1b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/script/template_list_of_scripts.md
@@ -0,0 +1,52 @@
+[ [Back to index](README.md) ]
+
+
+
+This is an automatically generated list of portable and reusable automation recipes (CM scripts)
+with a [human-friendly interface (CM)](https://github.com/mlcommons/ck)
+to run a growing number of ad-hoc MLPerf, MLOps, and DevOps scripts
+from [MLCommons projects](https://github.com/mlcommons/cm4mlops/tree/main/script)
+and [research papers](https://www.youtube.com/watch?v=7zpeIVwICa4)
+in a unified way on any operating system with any software and hardware
+natively or inside containers.
+
+Click on any automation recipe below to learn how to run and reuse it
+via CM command line, Python API or GUI.
+
+CM scripts can easily chained together into automation workflows using `deps` and `tags` keys
+while automatically updating all environment variables and paths
+for a given task and platform [using simple JSON or YAML](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-classification-onnx-py/_cm.yaml).
+
+
+*Note that CM is a community project being developed and extended by [MLCommons members and individual contributors](../CONTRIBUTING.md) -
+ you can find source code of CM scripts maintained by MLCommons [here](../cm-mlops/script).
+ Please join [Discord server](https://discord.gg/JjWNWXKxwT) to participate in collaborative developments or provide your feedback.*
+
+
+# License
+
+[Apache 2.0](LICENSE.md)
+
+
+# Copyright
+
+2022-2024 [MLCommons](https://mlcommons.org)
+
+
+
+
+
+# List of CM scripts by categories
+
+{{CM_TOC_CATEGORIES}}
+
+{{CM_TOC2}}
+
+# List of all sorted CM scripts
+
+{{CM_TOC}}
+
+
+{{CM_MAIN}}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/utils/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/utils/COPYRIGHT.md
new file mode 100644
index 0000000000..2a313520bb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/utils/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort.
diff --git a/cmx4mlops/cmx4mlops/repo/automation/utils/README.md b/cmx4mlops/cmx4mlops/repo/automation/utils/README.md
new file mode 100644
index 0000000000..9a844c6566
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/utils/README.md
@@ -0,0 +1,387 @@
+*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!*
+
+### Automation actions
+
+#### test
+
+ * CM CLI: ```cm test utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L15))
+ * CM CLI with UID: ```cm test utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L15))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'test'
+ 'automation':'utils,dc2743f8450541e3'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L15)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### get_host_os_info
+
+ * CM CLI: ```cm get_host_os_info utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L54))
+ * CM CLI with UID: ```cm get_host_os_info utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L54))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'get_host_os_info'
+ 'automation':'utils,dc2743f8450541e3'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L54)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### download_file
+
+ * CM CLI: ```cm download_file utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L156))
+ * CM CLI with UID: ```cm download_file utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L156))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'download_file'
+ 'automation':'utils,dc2743f8450541e3'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L156)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### unzip_file
+
+ * CM CLI: ```cm unzip_file utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L265))
+ * CM CLI with UID: ```cm unzip_file utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L265))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'unzip_file'
+ 'automation':'utils,dc2743f8450541e3'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L265)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### compare_versions
+
+ * CM CLI: ```cm compare_versions utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L343))
+ * CM CLI with UID: ```cm compare_versions utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L343))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'compare_versions'
+ 'automation':'utils,dc2743f8450541e3'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L343)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### json2yaml
+
+ * CM CLI: ```cm json2yaml utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L391))
+ * CM CLI with UID: ```cm json2yaml utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L391))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'json2yaml'
+ 'automation':'utils,dc2743f8450541e3'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L391)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### yaml2json
+
+ * CM CLI: ```cm yaml2json utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L429))
+ * CM CLI with UID: ```cm yaml2json utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L429))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'yaml2json'
+ 'automation':'utils,dc2743f8450541e3'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L429)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### sort_json
+
+ * CM CLI: ```cm sort_json utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L467))
+ * CM CLI with UID: ```cm sort_json utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L467))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'sort_json'
+ 'automation':'utils,dc2743f8450541e3'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L467)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### dos2unix
+
+ * CM CLI: ```cm dos2unix utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L504))
+ * CM CLI with UID: ```cm dos2unix utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L504))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'dos2unix'
+ 'automation':'utils,dc2743f8450541e3'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L504)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### replace_string_in_file
+
+ * CM CLI: ```cm replace_string_in_file utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L541))
+ * CM CLI with UID: ```cm replace_string_in_file utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L541))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'replace_string_in_file'
+ 'automation':'utils,dc2743f8450541e3'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L541)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### create_toc_from_md
+
+ * CM CLI: ```cm create_toc_from_md utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L591))
+ * CM CLI with UID: ```cm create_toc_from_md utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L591))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'create_toc_from_md'
+ 'automation':'utils,dc2743f8450541e3'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L591)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### copy_to_clipboard
+
+ * CM CLI: ```cm copy_to_clipboard utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L659))
+ * CM CLI with UID: ```cm copy_to_clipboard utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L659))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'copy_to_clipboard'
+ 'automation':'utils,dc2743f8450541e3'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L659)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### list_files_recursively
+
+ * CM CLI: ```cm list_files_recursively utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L737))
+ * CM CLI with UID: ```cm list_files_recursively utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L737))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'list_files_recursively'
+ 'automation':'utils,dc2743f8450541e3'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L737)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### generate_secret
+
+ * CM CLI: ```cm generate_secret utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L770))
+ * CM CLI with UID: ```cm generate_secret utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L770))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'generate_secret'
+ 'automation':'utils,dc2743f8450541e3'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L770)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### detect_tags_in_artifact
+
+ * CM CLI: ```cm detect_tags_in_artifact utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L793))
+ * CM CLI with UID: ```cm detect_tags_in_artifact utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L793))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'detect_tags_in_artifact'
+ 'automation':'utils,dc2743f8450541e3'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L793)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### prune_input
+
+ * CM CLI: ```cm prune_input utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L822))
+ * CM CLI with UID: ```cm prune_input utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L822))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'prune_input'
+ 'automation':'utils,dc2743f8450541e3'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L822)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### uid
+
+ * CM CLI: ```cm uid utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L864))
+ * CM CLI with UID: ```cm uid utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L864))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'uid'
+ 'automation':'utils,dc2743f8450541e3'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L864)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### system
+
+ * CM CLI: ```cm system utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L891))
+ * CM CLI with UID: ```cm system utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L891))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'system'
+ 'automation':'utils,dc2743f8450541e3'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L891)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+#### load_cfg
+
+ * CM CLI: ```cm load_cfg utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L969))
+ * CM CLI with UID: ```cm load_cfg utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L969))
+ * CM Python API:
+ ```python
+ import cmind
+
+ r=cm.access({
+ 'action':'load_cfg'
+ 'automation':'utils,dc2743f8450541e3'
+ 'out':'con'
+ ```
+ [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L969)
+ ```python
+ })
+ if r['return']>0:
+ print(r['error'])
+ ```
+
+### Maintainers
+
+* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/automation/utils/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/utils/_cm.json
new file mode 100644
index 0000000000..f2dc9c5b66
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/utils/_cm.json
@@ -0,0 +1,12 @@
+{
+ "alias": "utils",
+ "automation_alias": "automation",
+ "automation_uid": "bbeb15d8f0a944a4",
+ "desc": "Accessing various CM utils",
+ "developers": "[Grigori Fursin](https://cKnowledge.org/gfursin)",
+ "sort": 800,
+ "tags": [
+ "automation"
+ ],
+ "uid": "dc2743f8450541e3"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/utils/module.py b/cmx4mlops/cmx4mlops/repo/automation/utils/module.py
new file mode 100644
index 0000000000..90b5c5c290
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/utils/module.py
@@ -0,0 +1,1121 @@
+# Author: Grigori Fursin
+# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+import os
+
+from cmind.automation import Automation
+from cmind import utils
+
+
+class CAutomation(Automation):
+ """
+ Automation actions
+ """
+
+ ############################################################
+ def __init__(self, cmind, automation_file):
+ super().__init__(cmind, __file__)
+
+ ############################################################
+ def test(self, i):
+ """
+ Test automation
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ automation (str): automation as CM string object
+
+ parsed_automation (list): prepared in CM CLI or CM access function
+ [ (automation alias, automation UID) ] or
+ [ (automation alias, automation UID), (automation repo alias, automation repo UID) ]
+
+ (artifact) (str): artifact as CM string object
+
+ (parsed_artifact) (list): prepared in CM CLI or CM access function
+ [ (artifact alias, artifact UID) ] or
+ [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
+
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * Output from this automation action
+
+ """
+
+ import json
+ print(json.dumps(i, indent=2))
+
+ return {'return': 0}
+
+ ##########################################################################
+ def get_host_os_info(self, i):
+ """
+ Get some host platform name (currently windows or linux) and OS bits
+
+ Args:
+ (CM input dict):
+
+ (bits) (str): force host platform bits
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * info (dict):
+ * platform (str): "windows", "linux" or "darwin"
+ * bat_ext (str): ".bat" or ".sh"
+ * bits (str): 32 or 64 bits
+ * python_bits 9str): python bits
+
+ """
+
+ import os
+ import platform
+ import struct
+
+ info = {}
+
+ pbits = str(8 * struct.calcsize("P"))
+
+ if platform.system().lower().startswith('win'):
+ platform = 'windows'
+ info['bat_ext'] = '.bat'
+ info['set_env'] = 'set ${key}=${value}'
+ info['env_separator'] = ';'
+ info['env_var'] = '%env_var%'
+ info['bat_rem'] = 'rem ${rem}'
+ info['run_local_bat'] = 'call ${bat_file}'
+ info['run_local_bat_from_python'] = 'call ${bat_file}'
+ info['run_bat'] = 'call ${bat_file}'
+ info['start_script'] = ['@echo off', '']
+ info['env'] = {
+ "CM_WINDOWS": "yes"
+ }
+ else:
+ if platform.system().lower().startswith('darwin'):
+ platform = 'darwin'
+ else:
+ platform = 'linux'
+
+ info['bat_ext'] = '.sh'
+ info['set_env'] = 'export ${key}="${value}"'
+ info['env_separator'] = ':'
+ info['env_var'] = '${env_var}'
+ info['set_exec_file'] = 'chmod 755 "${file_name}"'
+ info['bat_rem'] = '# ${rem}'
+ info['run_local_bat'] = '. ./${bat_file}'
+ info['run_local_bat_from_python'] = 'bash -c ". ./${bat_file}"'
+ info['run_bat'] = '. ${bat_file}'
+ info['start_script'] = ['#!/bin/bash', '']
+ info['env'] = {}
+
+ info['platform'] = platform
+
+ obits = i.get('bits', '')
+ if obits == '':
+ obits = '32'
+ if platform == 'windows':
+ # Trying to get fast way to detect bits
+ if os.environ.get('ProgramW6432', '') != '' or os.environ.get(
+ 'ProgramFiles(x86)', '') != '': # pragma: no cover
+ obits = '64'
+ else:
+ # On Linux use first getconf LONG_BIT and if doesn't work use
+ # python bits
+
+ obits = pbits
+
+ r = utils.gen_tmp_file({})
+ if r['return'] > 0:
+ return r
+
+ fn = r['file_name']
+
+ cmd = 'getconf LONG_BIT > ' + fn
+ rx = os.system(cmd)
+
+ if rx == 0:
+ r = utils.load_txt(file_name=fn, remove_after_read=True)
+
+ if r['return'] == 0:
+ s = r['string'].strip()
+ if len(s) > 0 and len(s) < 4:
+ obits = s
+ else:
+ if os.path.isfile(fn):
+ os.remove(fn)
+
+ info['bits'] = obits
+ info['python_bits'] = pbits
+
+ return {'return': 0, 'info': info}
+
+ ##########################################################################
+ def download_file(self, i):
+ """
+ Download file using requests
+
+ Args:
+ (CM input dict):
+
+ url (str): URL with file
+ (filename) (str): explicit file name
+ (path) (str): path to record file (or current if empty)
+ (chunk_size) (int): chunck size in bytes (65536 by default)
+ (text) (str): print text before downloaded status ("Downloaded: " by default)
+ (verify) (bool): verify SSL certificate if True (True by default)
+ can be switched by global env CM_UTILS_DOWNLOAD_VERIFY_SSL = no
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * filename (str): file name
+ * path (str): path to file
+ * size (int): file size
+
+ """
+
+ import requests
+ import time
+ import sys
+ from urllib import parse
+
+ # Get URL
+ url = i['url']
+
+ # Check file name
+ file_name = i.get('filename', '')
+ if file_name == '':
+ parsed_url = parse.urlparse(url)
+ file_name = os.path.basename(parsed_url.path)
+
+ # Check path
+ path = i.get('path', '')
+ if path is None or path == '':
+ path = os.getcwd()
+
+ # Output file
+ path_to_file = os.path.join(path, file_name)
+
+ if os.path.isfile(path_to_file):
+ os.remove(path_to_file)
+
+ print('Downloading to {}'.format(path_to_file))
+ print('')
+
+ # Download
+ size = -1
+ downloaded = 0
+ chunk_size = i.get('chunk_size', 65536)
+
+ text = i.get('text', 'Downloaded: ')
+
+ if 'CM_UTILS_DOWNLOAD_VERIFY_SSL' in os.environ:
+ verify = os.environ['CM_UTILS_DOWNLOAD_VERIFY_SSL'] == 'yes'
+ else:
+ verify = i.get('verify', True)
+
+ try:
+ with requests.get(url, stream=True, allow_redirects=True, verify=verify) as download:
+ download.raise_for_status()
+
+ size_string = download.headers.get('Content-Length')
+
+ if size_string is None:
+ transfer_encoding = download.headers.get(
+ 'Transfer-Encoding', '')
+ if transfer_encoding != 'chunked':
+ return {'return': 1, 'error': 'did not receive file'}
+ else:
+ size_string = "0"
+
+ size = int(size_string)
+
+ with open(path_to_file, 'wb') as output:
+ for chunk in download.iter_content(chunk_size=chunk_size):
+
+ if chunk:
+ output.write(chunk)
+ if size == 0:
+ continue
+ downloaded += 1
+ percent = downloaded * chunk_size * 100 / size
+
+ sys.stdout.write("\r{}{:3.0f}%".format(text, percent))
+ sys.stdout.flush()
+
+ sys.stdout.write("\r{}{:3.0f}%".format(text, 100))
+ sys.stdout.flush()
+
+ except Exception as e:
+ return {'return': 1, 'error': format(e)}
+
+ print('')
+ if size == 0:
+ file_stats = os.stat(path_to_file)
+ size = file_stats.st_size
+
+ return {'return': 0, 'filename': file_name,
+ 'path': path_to_file, 'size': size}
+
+ ##########################################################################
+ def unzip_file(self, i):
+ """
+ Unzip file
+
+ Args:
+ (CM input dict):
+
+ filename (str): explicit file name
+ (path) (str): path where to unzip file (current path otherwise)
+ (strip_folders) (int): strip first folders
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ """
+
+ import zipfile
+
+ # Check file name
+ file_name = i['filename']
+
+ if not os.path.isfile(file_name):
+ return {'return': 1,
+ 'error': 'file {} not found'.format(file_name)}
+
+ console = i.get('out') == 'con'
+
+ # Attempt to read cmr.json
+ file_name_handle = open(file_name, 'rb')
+ file_name_zip = zipfile.ZipFile(file_name_handle)
+
+ info_files = file_name_zip.infolist()
+
+ path = i.get('path', '')
+ if path is None or path == '':
+ path = os.getcwd()
+
+ strip_folders = i.get('strip_folders', 0)
+
+ # Unpacking zip
+ for info in info_files:
+ f = info.filename
+ permissions = info.external_attr
+
+ if not f.startswith('..') and not f.startswith(
+ '/') and not f.startswith('\\'):
+ f_zip = f
+
+ if strip_folders > 0:
+ fsplit = f.split('/') # Zip standard on all OS
+ f = '/'.join(fsplit[strip_folders:])
+
+ file_path = os.path.join(path, f)
+
+ if f.endswith('/'):
+ # create directory
+ if not os.path.exists(file_path):
+ os.makedirs(file_path)
+ else:
+ dir_name = os.path.dirname(file_path)
+ if not os.path.exists(dir_name):
+ os.makedirs(dir_name)
+
+ # extract file
+ file_out = open(file_path, 'wb')
+ file_out.write(file_name_zip.read(f_zip))
+ file_out.close()
+
+ if permissions > 0xffff:
+ os.chmod(file_path, permissions >> 16)
+
+ file_name_zip.close()
+ file_name_handle.close()
+
+ return {'return': 0}
+
+ ##########################################################################
+ def compare_versions(self, i):
+ """
+ Compare versions
+
+ Args:
+
+ version1 (str): version 1
+ version2 (str): version 2
+
+ Returns:
+ (CM return dict):
+
+ * comparison (int): 1 - version 1 > version 2
+ 0 - version 1 == version 2
+ -1 - version 1 < version 2
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+ """
+
+ version1 = i['version1']
+ version2 = i['version2']
+
+ l_version1 = version1.split('.')
+ l_version2 = version2.split('.')
+
+ # 3.9.6 vs 3.9
+ # 3.9 vs 3.9.6
+
+ i_version1 = [int(v) if v.isdigit() else v for v in l_version1]
+ i_version2 = [int(v) if v.isdigit() else v for v in l_version2]
+
+ comparison = 0
+
+ for index in range(max(len(i_version1), len(i_version2))):
+ v1 = i_version1[index] if index < len(i_version1) else 0
+ v2 = i_version2[index] if index < len(i_version2) else 0
+
+ if v1 > v2:
+ comparison = 1
+ break
+ elif v1 < v2:
+ comparison = -1
+ break
+
+ return {'return': 0, 'comparison': comparison}
+
+ ##########################################################################
+ def json2yaml(self, i):
+ """
+ Convert JSON file to YAML
+
+ Args:
+
+ input (str): input file (.json)
+ (output) (str): output file (.yaml)
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+ """
+
+ input_file = i.get('input', '')
+
+ if input_file == '':
+ return {'return': 1, 'error': 'please specify --input={json file}'}
+
+ output_file = i.get('output', '')
+
+ r = utils.load_json(input_file, check_if_exists=True)
+ if r['return'] > 0:
+ return r
+
+ meta = r['meta']
+
+ if output_file == '':
+ output_file = input_file[:-
+ 5] if input_file.endswith('.json') else input_file
+ output_file += '.yaml'
+
+ r = utils.save_yaml(output_file, meta)
+ if r['return'] > 0:
+ return r
+
+ return {'return': 0}
+
+ ##########################################################################
+ def yaml2json(self, i):
+ """
+ Convert YAML file to JSON
+
+ Args:
+
+ input (str): input file (.yaml)
+ (output) (str): output file (.json)
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+ """
+
+ input_file = i.get('input', '')
+
+ if input_file == '':
+ return {'return': 1, 'error': 'please specify --input={yaml file}'}
+
+ output_file = i.get('output', '')
+
+ r = utils.load_yaml(input_file, check_if_exists=True)
+ if r['return'] > 0:
+ return r
+
+ meta = r['meta']
+
+ if output_file == '':
+ output_file = input_file[:-
+ 5] if input_file.endswith('.yaml') else input_file
+ output_file += '.json'
+
+ r = utils.save_json(output_file, meta)
+ if r['return'] > 0:
+ return r
+
+ return {'return': 0}
+
+ ##########################################################################
+ def sort_json(self, i):
+ """
+ Sort JSON file
+
+ Args:
+
+ input (str): input file (.json)
+ (output) (str): output file
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+ """
+
+ input_file = i.get('input', '')
+
+ if input_file == '':
+ return {'return': 1, 'error': 'please specify --input={json file}'}
+
+ r = utils.load_json(input_file, check_if_exists=True)
+ if r['return'] > 0:
+ return r
+
+ meta = r['meta']
+
+ output_file = i.get('output', '')
+
+ if output_file == '':
+ output_file = input_file
+
+ r = utils.save_json(output_file, meta, sort_keys=True)
+ if r['return'] > 0:
+ return r
+
+ return {'return': 0}
+
+ ##########################################################################
+ def dos2unix(self, i):
+ """
+ Convert DOS file to UNIX (remove \r)
+
+ Args:
+
+ input (str): input file (.txt)
+ (output) (str): output file
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+ """
+
+ input_file = i.get('input', '')
+
+ if input_file == '':
+ return {'return': 1, 'error': 'please specify --input={txt file}'}
+
+ r = utils.load_txt(input_file, check_if_exists=True)
+ if r['return'] > 0:
+ return r
+
+ s = r['string'].replace('\r', '')
+
+ output_file = i.get('output', '')
+
+ if output_file == '':
+ output_file = input_file
+
+ r = utils.save_txt(output_file, s)
+ if r['return'] > 0:
+ return r
+
+ return {'return': 0}
+
+ ##########################################################################
+ def replace_string_in_file(self, i):
+ """
+ Convert DOS file to UNIX (remove \r)
+
+ Args:
+
+ input (str): input file (.txt)
+ (output) (str): output file
+ string (str): string to replace
+ replacement (str): replacement string
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ (update) (bool): True if file was upated
+ """
+
+ input_file = i.get('input', '')
+ if input_file == '':
+ return {'return': 1, 'error': 'please specify --input={txt file}'}
+
+ string = i.get('string', '')
+ if string == '':
+ return {'return': 1,
+ 'error': 'please specify --string={string to replace}'}
+
+ replacement = i.get('replacement', '')
+ if replacement == '':
+ return {'return': 1,
+ 'error': 'please specify --replacement={string to replace}'}
+
+ output_file = i.get('output', '')
+
+ if output_file == '':
+ output_file = input_file
+
+ r = utils.load_txt(input_file, check_if_exists=True)
+ if r['return'] > 0:
+ return r
+
+ s = r['string'].replace('\r', '')
+
+ s = s.replace(string, replacement)
+
+ r = utils.save_txt(output_file, s)
+ if r['return'] > 0:
+ return r
+
+ return {'return': 0}
+
+ ##########################################################################
+ def create_toc_from_md(self, i):
+ """
+ Convert DOS file to UNIX (remove \r)
+
+ Args:
+
+ input (str): input file (.md)
+ (output) (str): output file (input+'.toc)
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+ """
+
+ input_file = i.get('input', '')
+ if input_file == '':
+ return {'return': 1, 'error': 'please specify --input={txt file}'}
+
+ output_file = i.get('output', '')
+
+ if output_file == '':
+ output_file = input_file + '.toc'
+
+ r = utils.load_txt(input_file, check_if_exists=True)
+ if r['return'] > 0:
+ return r
+
+ lines = r['string'].split('\n')
+
+ toc = []
+
+ toc.append('')
+ toc.append('Click here to see the table of contents.
')
+ toc.append('')
+
+ for line in lines:
+ line = line.strip()
+
+ if line.startswith('#'):
+ j = line.find(' ')
+ if j >= 0:
+ title = line[j:].strip()
+
+ x = title.lower().replace(' ', '-')
+
+ for k in range(0, 2):
+ if x.startswith('*'):
+ x = x[1:]
+ if x.endswith('*'):
+ x = x[:-1]
+
+ for z in [':', '+', '.', '(', ')', ',']:
+ x = x.replace(z, '')
+
+ y = ' ' * (2 * (j - 1)) + '* [' + title + '](#' + x + ')'
+
+ toc.append(y)
+
+ toc.append('')
+ toc.append(' ')
+
+ r = utils.save_txt(output_file, '\n'.join(toc) + '\n')
+ if r['return'] > 0:
+ return r
+
+ return {'return': 0}
+
+ ##########################################################################
+ def copy_to_clipboard(self, i):
+ """
+ Copy string to a clipboard
+
+ Args:
+
+ string (str): string to copy to a clipboard
+ (add_quotes) (bool): add quotes to the string in a clipboard
+ (skip_fail) (bool): if True, do not fail
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+ """
+
+ s = i.get('string', '')
+
+ if i.get('add_quotes', False):
+ s = '"' + s + '"'
+
+ failed = False
+ warning = ''
+
+ # Try to load pyperclip (seems to work fine on Windows)
+ try:
+ import pyperclip
+ except Exception as e:
+ warning = format(e)
+ failed = True
+ pass
+
+ if not failed:
+ pyperclip.copy(s)
+ else:
+ failed = False
+
+ # Try to load Tkinter
+ try:
+ from Tkinter import Tk
+ except ImportError as e:
+ warning = format(e)
+ failed = True
+ pass
+
+ if failed:
+ failed = False
+ try:
+ from tkinter import Tk
+ except ImportError as e:
+ warning = format(e)
+ failed = True
+ pass
+
+ if not failed:
+ # Copy to clipboard
+ try:
+ r = Tk()
+ r.withdraw()
+ r.clipboard_clear()
+ r.clipboard_append(s)
+ r.update()
+ r.destroy()
+ except Exception as e:
+ failed = True
+ warning = format(e)
+
+ rr = {'return': 0}
+
+ if failed:
+ if not i.get('skip_fail', False):
+ return {'return': 1, 'error': warning}
+
+ rr['warning'] = warning
+
+ return rr
+
+ ##########################################################################
+ def list_files_recursively(self, i):
+ """
+ List files and concatenate into string separate by comma
+
+ Args:
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+ """
+
+ files = os.walk('.')
+
+ s = ''
+
+ for (dir_path, dir_names, file_names) in files:
+ for f in file_names:
+ if s != '':
+ s += ','
+
+ if dir_path == '.':
+ dir_path2 = ''
+ else:
+ dir_path2 = dir_path[2:].replace('\\', '/') + '/'
+
+ s += dir_path2 + f
+
+ print(s)
+
+ return {'return': 0}
+
+ ##########################################################################
+ def generate_secret(self, i):
+ """
+ Generate secret for web apps
+
+ Args:
+
+ Returns:
+ (CM return dict):
+
+ secret (str): secret
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+ """
+
+ import secrets
+ s = secrets.token_urlsafe(16)
+
+ print(s)
+
+ return {'return': 0, 'secret': s}
+
+ ##########################################################################
+ def detect_tags_in_artifact(self, i):
+ """
+ Detect if there are tags in an artifact name (spaces) and update input
+
+ Args:
+
+ input (dict) : original input
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+ """
+
+ inp = i['input']
+
+ artifact = inp.get('artifact', '')
+ if artifact == '.':
+ del (inp['artifact'])
+ elif ' ' in artifact: # or ',' in artifact:
+ del (inp['artifact'])
+ if 'parsed_artifact' in inp:
+ del (inp['parsed_artifact'])
+ # Force substitute tags
+ inp['tags'] = artifact.replace(' ', ',')
+
+ return {'return': 0}
+
+ ##########################################################################
+ def prune_input(self, i):
+ """
+ Leave only input keys and remove the rest (to regenerate CM commands)
+
+ Args:
+
+ input (dict) : original input
+ (extra_keys_starts_with) (list): remove keys that starts
+ with the ones from this list
+
+ Returns:
+ (CM return dict):
+
+ new_input (dict): pruned input
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+ """
+
+ import copy
+
+ inp = i['input']
+ extra_keys = i.get('extra_keys_starts_with', [])
+
+ i_run_cmd_arc = copy.deepcopy(inp)
+ for k in inp:
+ remove = False
+ if k in ['action', 'automation', 'cmd', 'out',
+ 'parsed_automation', 'parsed_artifact', 'self_module']:
+ remove = True
+ if not remove:
+ for ek in extra_keys:
+ if k.startswith(ek):
+ remove = True
+ break
+
+ if remove:
+ del (i_run_cmd_arc[k])
+
+ return {'return': 0, 'new_input': i_run_cmd_arc}
+
+ ##########################################################################
+
+ def uid(self, i):
+ """
+ Generate CM UID.
+
+ Args:
+ (CM input dict): empty dict
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * uid (str): CM UID
+ """
+
+ console = i.get('out') == 'con'
+
+ r = utils.gen_uid()
+
+ if console:
+ print(r['uid'])
+
+ return r
+
+ ##########################################################################
+
+ def system(self, i):
+ """
+ Run system command and redirect output to string.
+
+ Args:
+ (CM input dict):
+
+ * cmd (str): command line
+ * (path) (str): go to this directory and return back to current
+ * (stdout) (str): stdout file
+ * (stderr) (str): stderr file
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * ret (int): return code
+ * std (str): stdout + stderr
+ * stdout (str): stdout
+ * stderr (str): stderr
+ """
+
+ cmd = i['cmd']
+
+ if cmd == '':
+ return {'return': 1, 'error': 'cmd is empty'}
+
+ path = i.get('path', '')
+ if path != '' and os.path.isdir(path):
+ cur_dir = os.getcwd()
+ os.chdir(path)
+
+ if i.get('stdout', '') != '':
+ fn1 = i['stdout']
+ fn1_delete = False
+ else:
+ r = utils.gen_tmp_file({})
+ if r['return'] > 0:
+ return r
+ fn1 = r['file_name']
+ fn1_delete = True
+
+ if i.get('stderr', '') != '':
+ fn2 = i['stderr']
+ fn2_delete = False
+ else:
+ r = utils.gen_tmp_file({})
+ if r['return'] > 0:
+ return r
+ fn2 = r['file_name']
+ fn2_delete = True
+
+ cmd += ' > ' + fn1 + ' 2> ' + fn2
+ rx = os.system(cmd)
+
+ std = ''
+ stdout = ''
+ stderr = ''
+
+ if os.path.isfile(fn1):
+ r = utils.load_txt(file_name=fn1, remove_after_read=fn1_delete)
+ if r['return'] == 0:
+ stdout = r['string'].strip()
+
+ if os.path.isfile(fn2):
+ r = utils.load_txt(file_name=fn2, remove_after_read=fn2_delete)
+ if r['return'] == 0:
+ stderr = r['string'].strip()
+
+ std = stdout
+ if stderr != '':
+ if std != '':
+ std += '\n'
+ std += stderr
+
+ if path != '' and os.path.isdir(path):
+ os.chdir(cur_dir)
+
+ return {'return': 0, 'ret': rx, 'stdout': stdout,
+ 'stderr': stderr, 'std': std}
+
+ ############################################################
+ def load_cfg(self, i):
+ """
+ Load configuration artifacts and files
+
+ Args:
+ (CM input dict):
+
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ """
+
+ return utils.call_internal_module(
+ self, __file__, 'module_cfg', 'load_cfg', i)
+
+ ############################################################
+ def select_cfg(self, i):
+ """
+ Select cfg interactively
+
+ Args:
+ (CM input dict):
+ tags (str): list of tags to find cfg
+ alias (str): alias of a cfg file
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ """
+
+ i['self_module'] = self
+
+ return utils.call_internal_module(
+ self, __file__, 'module_cfg', 'select_cfg', i)
+
+ ############################################################
+ def print_yaml(self, i):
+ """
+ Print YAML file
+
+ Args:
+ (CM input dict):
+ file (str): input file
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ """
+
+ filename = i.get('file', '')
+ if filename == '':
+ return {'return': 1, 'error': 'please specify --file={YAML file}'}
+
+ r = utils.load_yaml(filename, check_if_exists=True)
+ if r['return'] > 0:
+ return r
+
+ meta = r['meta']
+
+ import json
+ print(json.dumps(meta, indent=2))
+
+ return {'return': 0}
+
+ ############################################################
+ def print_json(self, i):
+ """
+ Print YAML file
+
+ Args:
+ (CM input dict):
+ file (str): input file
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ """
+
+ filename = i.get('file', '')
+ if filename == '':
+ return {'return': 1, 'error': 'please specify --file={JSON file}'}
+
+ r = utils.load_json(filename, check_if_exists=True)
+ if r['return'] > 0:
+ return r
+
+ meta = r['meta']
+
+ import json
+ print(json.dumps(meta, indent=2))
+
+ return {'return': 0}
diff --git a/cmx4mlops/cmx4mlops/repo/automation/utils/module_cfg.py b/cmx4mlops/cmx4mlops/repo/automation/utils/module_cfg.py
new file mode 100644
index 0000000000..e3a445c128
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/automation/utils/module_cfg.py
@@ -0,0 +1,352 @@
+# Author: Grigori Fursin
+# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+import os
+import cmind
+import copy
+
+base_path = {}
+base_path_meta = {}
+
+##########################################################################
+
+
+def load_cfg(i):
+
+ tags = i.get('tags', '')
+ artifact = i.get('artifact', '')
+
+ key = i.get('key', '')
+ key_end = i.get('key_end', [])
+
+ ii = {'action': 'find',
+ 'automation': 'cfg'}
+ if artifact != '':
+ ii['artifact'] = artifact
+ elif tags != '':
+ ii['tags'] = tags
+
+ r = cmind.access(ii)
+ if r['return'] > 0:
+ return r
+
+ lst = r['list']
+
+ prune = i.get('prune', {})
+ prune_key = prune.get('key', '')
+ prune_key_uid = prune.get('key_uid', '')
+ prune_meta_key = prune.get('meta_key', '')
+ prune_meta_key_uid = prune.get('meta_key_uid', '')
+ prune_uid = prune.get('uid', '')
+ prune_list = prune.get('list', [])
+
+ # Checking individual files inside CM entry
+ selection = []
+
+ if i.get('skip_files', False):
+ for l in lst:
+ meta = l.meta
+ full_path = l.path
+
+ meta['full_path'] = full_path
+
+ add = True
+
+ if prune_key != '' and prune_key_uid != '':
+ if prune_key_uid not in meta.get(prune_key, []):
+ add = False
+
+ if add:
+ selection.append(meta)
+ else:
+ for l in lst:
+ path = l.path
+
+ main_meta = l.meta
+
+ skip = False
+
+ if prune_meta_key != '' and prune_meta_key_uid != '':
+ if prune_meta_key_uid not in main_meta.get(prune_meta_key, []):
+ skip = True
+
+ if skip:
+ continue
+
+ all_tags = main_meta.get('tags', [])
+
+ files = os.listdir(path)
+
+ for f in files:
+ if key != '' and not f.startswith(key):
+ continue
+
+ if f.startswith('_') or (not f.endswith(
+ '.json') and not f.endswith('.yaml')):
+ continue
+
+ if len(key_end) > 0:
+ skip = True
+ for ke in key_end:
+ if f.endswith(ke):
+ skip = False
+ break
+ if skip:
+ continue
+
+ full_path = os.path.join(path, f)
+
+ full_path_without_ext = full_path[:-5]
+
+ r = cmind.utils.load_yaml_and_json(full_path_without_ext)
+ if r['return'] > 0:
+ print('Warning: problem loading file {}'.format(full_path))
+ else:
+ meta = r['meta']
+
+ # Check base
+ r = process_base(meta, full_path)
+ if r['return'] > 0:
+ return r
+ meta = r['meta']
+
+ uid = meta['uid']
+
+ # Check pruning
+ add = True
+
+ if len(prune) > 0:
+ if prune_uid != '' and uid != prune_uid:
+ add = False
+
+ if add and len(
+ prune_list) > 0 and uid not in prune_list:
+ add = False
+
+ if add and prune_key != '' and prune_key_uid != '' and prune_key_uid != meta.get(
+ prune_key, None):
+ add = False
+
+ if add:
+ meta['full_path'] = full_path
+
+ add_all_tags = copy.deepcopy(all_tags)
+
+ name = meta.get('name', '')
+ if name == '':
+ name = ' '.join(meta.get('tags', []))
+ name = name.strip()
+ meta['name'] = name
+
+ file_tags = meta.get('tags', '').strip()
+ if file_tags == '':
+ if name != '':
+ add_all_tags += [v.lower()
+ for v in name.split(' ')]
+ else:
+ add_all_tags += file_tags.split(',')
+
+ meta['all_tags'] = add_all_tags
+
+ meta['main_meta'] = main_meta
+
+ selection.append(meta)
+
+ return {'return': 0, 'lst': lst, 'selection': selection}
+
+##########################################################################
+
+
+def process_base(meta, full_path):
+
+ global base_path, base_path_meta
+
+ _base = meta.get('_base', '')
+ if _base != '':
+ name = ''
+
+ filename = _base
+ full_path_base = os.path.dirname(full_path)
+
+ if not filename.endswith('.yaml') and not filename.endswith('.json'):
+ return {'return': 1, 'error': '_base file {} in {} must be .yaml or .json'.format(
+ filename, full_path)}
+
+ if ':' in _base:
+ x = _base.split(':')
+ name = x[0]
+
+ full_path_base = base_path.get(name, '')
+ if full_path_base == '':
+
+ # Find artifact
+ r = cmind.access({'action': 'find',
+ 'automation': 'cfg',
+ 'artifact': name})
+ if r['return'] > 0:
+ return r
+
+ lst = r['list']
+
+ if len(lst) == 0:
+ if not os.path.isfile(path):
+ return {'return': 1, 'error': '_base artifact {} not found in {}'.format(
+ name, full_path)}
+
+ full_path_base = lst[0].path
+
+ base_path[name] = full_path_base
+
+ filename = x[1]
+
+ # Load base
+ path = os.path.join(full_path_base, filename)
+
+ if not os.path.isfile(path):
+ return {'return': 1, 'error': '_base file {} not found in {}'.format(
+ filename, full_path)}
+
+ if path in base_path_meta:
+ base = copy.deepcopy(base_path_meta[path])
+ else:
+ path_without_ext = path[:-5]
+
+ r = cmind.utils.load_yaml_and_json(path_without_ext)
+ if r['return'] > 0:
+ return r
+
+ base = r['meta']
+
+ base_path_meta[path] = copy.deepcopy(base)
+
+ for k in meta:
+ v = meta[k]
+
+ if k not in base:
+ base[k] = v
+ else:
+ if isinstance(v, str):
+ # Only merge a few special keys and overwrite the rest
+ if k in ['tags', 'name']:
+ base[k] += meta[k]
+ else:
+ base[k] = meta[k]
+
+
+elif isinstance(v, elif) for vv in v:
+ base[k].append(vv)
+elif isinstance(v, elif ) base[k].merge(v)
+
+ meta = base
+
+ return {'return': 0, 'meta':meta}
+
+##########################################################################
+
+def select_cfg(i):
+
+ self_module = i['self_module']
+ tags = i['tags']
+ alias = i.get('alias', '')
+ uid = i.get('uid', '')
+ title = i.get('title', '')
+
+ # Check if alias is not provided
+ r = self_module.cmind.access({'action': 'find', 'automation':'cfg', 'tags':'basic,docker,configurations'})
+ if r['return'] > 0:
+ return r
+
+ lst = r['list']
+
+ selector = []
+
+ # Do coarse-grain search for CM artifacts
+ for l in lst:
+ p = l.path
+
+ if alias != '':
+ for ext in ['.json', '.yaml']:
+ p1 = os.path.join(p, alias +ext)
+ if os.path.isfile(p1):
+ selector.append({'path': p1, 'alias':alias})
+ break
+
+ else:
+ files = os.listdir(p)
+
+ for f in files:
+ if not f.startswith('_cm') and (
+ f.endswith('.json') or f.endswith('.yaml')):
+ selector.append({'path': os.path.join(p, f), 'alias':f[:-5]})
+
+ # Load meta for name and UID
+ selector_with_meta = []
+ for s in range(0, len(selector)):
+ ss = selector[s]
+
+ path = ss['path']
+
+ full_path_without_ext = path[:-5]
+
+ r = cmind.utils.load_yaml_and_json(full_path_without_ext)
+ if r['return'] >0:
+ print('Warning: problem loading configuration file {}'.format(path))
+
+ meta = r['meta']
+
+ if uid == '' or meta.get('uid', '') == uid:
+ ss['meta'] = meta
+ selector_with_meta.append(ss)
+
+ # Quit if no configurations found
+ if len(selector_with_meta) == 0:
+ return {'return': 16, 'error':'configuration was not found'}
+
+ select = 0
+ if len(selector_with_meta) > 1:
+ xtitle = ' ' + title if title != '' else ''
+ print('')
+ print('Available{} configurations:'.format(xtitle))
+
+ print('')
+
+ selector_with_meta = sorted(selector_with_meta, key = lambda x: x['meta'].get('name', ''))
+ s = 0
+ for ss in selector_with_meta:
+ alias = ss['alias']
+ uid = ss['meta'].get('uid', '')
+ name = ss['meta'].get('name', '')
+
+ x = name
+ if x!='':
+ x+=' '
+ x += '(' + uid + ')'
+
+ print(f'{s}) {x}'.format(s, x))
+
+ s += 1
+
+ print('')
+ select = input('Enter configuration number of press Enter for 0: ')
+
+ if select.strip() == '':
+ select = '0'
+
+ select = int(select)
+
+ if select <0 or select>=len(selector):
+ return {'return': 1, 'error':'selection is out of range'}
+
+ ss = selector_with_meta[select]
+
+ return {'return': 0, 'selection':ss}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/_cm.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/_cm.json
new file mode 100644
index 0000000000..6877b34a7e
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/_cm.json
@@ -0,0 +1,10 @@
+{
+ "alias": "benchmark-hardware-compute",
+ "automation_alias": "cfg",
+ "automation_uid": "88dce9c160324c5d",
+ "tags": [
+ "benchmark",
+ "compute"
+ ],
+ "uid": "ca67f372e7294afd"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/amd-cpu-x64.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/amd-cpu-x64.json
new file mode 100644
index 0000000000..53f295d729
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/amd-cpu-x64.json
@@ -0,0 +1,6 @@
+{
+ "uid": "cdfd424c32734e38",
+ "name": "AMD - x64",
+ "tags": "cpu,x64,generic,amd",
+ "mlperf_inference_device": "cpu"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/amd-gpu.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/amd-gpu.json
new file mode 100644
index 0000000000..d70e1d1554
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/amd-gpu.json
@@ -0,0 +1,6 @@
+{
+ "uid": "d8f06040f7294319",
+ "name": "AMD - GPU",
+ "tags": "gpu,amd",
+ "mlperf_inference_device": "rocm"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/generic-cpu-arm64.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/generic-cpu-arm64.json
new file mode 100644
index 0000000000..7af318b27b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/generic-cpu-arm64.json
@@ -0,0 +1,6 @@
+{
+ "uid":"357a972e79614903",
+ "name": "Arm - AArch64",
+ "tags": "cpu,arm64,aarch64,generic",
+ "mlperf_inference_device": "cpu"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/google-tpu.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/google-tpu.json
new file mode 100644
index 0000000000..2bb4d22cf5
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/google-tpu.json
@@ -0,0 +1,6 @@
+{
+ "uid": "b3be7ac9ef954f5a",
+ "name": "Google - TPU",
+ "tags": "tpu,google",
+ "mlperf_inference_device": "tpu"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/habana-gaudi.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/habana-gaudi.json
new file mode 100644
index 0000000000..b6caa96554
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/habana-gaudi.json
@@ -0,0 +1,6 @@
+{
+ "uid": "a42388a2a8cd412c",
+ "name": "Intel/Habana - Gauidi 2",
+ "tags": "gaudi,habana",
+ "mlperf_inference_device": "gaudi"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/intel-cpu-x64.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/intel-cpu-x64.json
new file mode 100644
index 0000000000..2e8ab51c4a
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/intel-cpu-x64.json
@@ -0,0 +1,6 @@
+{
+ "uid": "ee8c568e0ac44f2b",
+ "name": "Intel - x64",
+ "tags": "cpu,x64,generic,intel",
+ "mlperf_inference_device": "cpu"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/nvidia-gpu-jetson-orin.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/nvidia-gpu-jetson-orin.yaml
new file mode 100644
index 0000000000..d8b9787c65
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/nvidia-gpu-jetson-orin.yaml
@@ -0,0 +1,7 @@
+uid: fe379ecd1e054a00
+
+tags: gpu,nvidia,jetson,orin
+
+name: "Nvidia - GPU - Jetson Orin"
+
+mlperf_inference_device: cuda
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/nvidia-gpu.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/nvidia-gpu.json
new file mode 100644
index 0000000000..5bc7582532
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/nvidia-gpu.json
@@ -0,0 +1,6 @@
+{
+ "uid": "fe379ecd1e054a00",
+ "name": "Nvidia - GPU",
+ "tags": "gpu,nvidia",
+ "mlperf_inference_device": "cuda"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/qualcomm-ai100.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/qualcomm-ai100.json
new file mode 100644
index 0000000000..aa84e57351
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/qualcomm-ai100.json
@@ -0,0 +1,6 @@
+{
+ "uid": "d2ae645066664463",
+ "name": "Qualcomm - AI 100",
+ "tags": "accelerator,acc,qualcomm,ai,100,ai-100",
+ "mlperf_inference_device": "qaic"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/stm-32L4R5ZIT6U-NUCLEO-L4R5ZI.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/stm-32L4R5ZIT6U-NUCLEO-L4R5ZI.yaml
new file mode 100644
index 0000000000..c6d06e9b43
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/stm-32L4R5ZIT6U-NUCLEO-L4R5ZI.yaml
@@ -0,0 +1,5 @@
+uid: 2cd26d4f92ca4b85
+
+tags: stm,stm32,stm32l4r5zit6u,nucleo,l4r5zi
+
+name: "STM32L4R5ZIT6U - NUCLEO-L4R5ZI"
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/_cm.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/_cm.json
new file mode 100644
index 0000000000..533c86271a
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/_cm.json
@@ -0,0 +1,10 @@
+{
+ "alias": "benchmark-list",
+ "automation_alias": "cfg",
+ "automation_uid": "88dce9c160324c5d",
+ "tags": [
+ "benchmark",
+ "list"
+ ],
+ "uid": "15291dfc4f904146"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/loadgen-cpp.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/loadgen-cpp.yaml
new file mode 100644
index 0000000000..590eb4475d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/loadgen-cpp.yaml
@@ -0,0 +1,19 @@
+uid: f594dc94b2714713
+
+tags: benchmark,run,loadgen,cpp
+
+name: "ML models with LoadGen (C++; Linux/MacOS/Windows) - dev"
+
+urls:
+- name: "GitHub dev page"
+ url: "https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-cpp"
+
+supported_compute:
+- cpu,x64
+- gpu,nvidia
+
+script_name: run-mlperf-inference-app,4a5d5b13fd7e4ac8
+
+bench_input:
+ mlperf_inference_implementation: mil
+
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/loadgen-python.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/loadgen-python.yaml
new file mode 100644
index 0000000000..74e5aa48e9
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/loadgen-python.yaml
@@ -0,0 +1,16 @@
+uid: 0d6b54eb27d1454e
+
+tags: benchmark,run,loadgen,python
+
+name: "ML models with LoadGen (Python; Linux/MacOS/Windows) - dev"
+
+urls:
+- name: "GitHub dev page"
+ url: "https://github.com/mlcommons/cm4mlops/tree/main/script/app-loadgen-generic-python"
+
+supported_compute:
+- cpu,x64
+- cpu,arm64
+- gpu,nvidia
+
+script_name: app-loadgen-generic-python,d3d949cc361747a6
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-abtf.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-abtf.yaml
new file mode 100644
index 0000000000..a01edcbde1
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-abtf.yaml
@@ -0,0 +1,18 @@
+uid: 94f0faaa0c61445d
+
+tags: benchmark,run,mlperf,abtf,mlperf-abtf
+
+name: "MLPerf ABTF - dev"
+
+urls:
+- name: "Announcement"
+ url: "https://mlcommons.org/2023/05/avcc-and-mlcommons-join-forces-to-develop-an-automotive-industry-standard/"
+- name: "MLCommons CM automation (under development)"
+ url: "https://access.cknowledge.org/playground/?action=scripts"
+
+supported_compute:
+- cpu,x64
+- cpu,arm64
+- gpu,nvidia
+
+script_name: test-abtf-ssd-pytorch,91bfc4333b054c21
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-inference.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-inference.yaml
new file mode 100644
index 0000000000..e57764a486
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-inference.yaml
@@ -0,0 +1,28 @@
+uid: 39877bb63fb54725
+
+tags: benchmark,run,mlperf,inference,mlperf-inference
+
+name: "MLPerf inference"
+
+urls:
+- name: "Official page"
+ url: "https://mlcommons.org/benchmarks/inference"
+- name: "GitHub dev page"
+ url: "https://github.com/mlcommons/inference"
+- name: "ArXiv paper"
+ url: "https://arxiv.org/abs/1911.02549"
+- name: "MLCommons CM automation for MLPerf inference"
+ url: "https://github.com/mlcommons/ck/tree/master/docs/mlperf/inference"
+
+script_name: run-mlperf-inference-app,4a5d5b13fd7e4ac8
+
+skip_extra_urls: true
+
+supported_compute:
+- cpu,x64
+- cpu,arm64
+- gpu,nvidia
+- gpu,amd
+- accelerator,acc,qualcomm,ai,100,ai-100
+- tpu,google
+- gaudi,habana
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-mobile.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-mobile.yaml
new file mode 100644
index 0000000000..85771a44d9
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-mobile.yaml
@@ -0,0 +1,14 @@
+uid: 8b2ed0897bd74267
+
+tags: benchmark,run,mlperf,mobile,mlperf-mobile
+
+name: "MLPerf mobile"
+
+urls:
+- name: "Official page"
+ url: "https://mlcommons.org/benchmarks/inference-mobile/"
+- name: "GitHub page for mobile app"
+ url: "https://github.com/mlcommons/mobile_app_open"
+
+supported_compute:
+- cpu,arm64
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-tiny.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-tiny.yaml
new file mode 100644
index 0000000000..d6aeccabc5
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-tiny.yaml
@@ -0,0 +1,16 @@
+uid: 28870394c19c4c37
+
+tags: benchmark,run,mlperf,tiny,mlperf-tiny
+
+name: "MLPerf tiny"
+
+urls:
+- name: "Official page"
+ url: "https://mlcommons.org/benchmarks/inference-tiny"
+- name: "GitHub dev page"
+ url: "https://github.com/mlcommons/tiny"
+- name: "MLCommons CM automation (under development)"
+ url: "https://github.com/mlcommons/ck/blob/master/docs/tutorials/reproduce-mlperf-tiny.md"
+
+supported_compute:
+- stm32
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-training.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-training.yaml
new file mode 100644
index 0000000000..8b95de4f73
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-training.yaml
@@ -0,0 +1,18 @@
+uid: 59311e6098c14b22
+
+tags: benchmark,run,mlperf,training,mlperf-training
+
+name: "MLPerf training"
+
+urls:
+- name: "Official page"
+ url: "https://mlcommons.org/benchmarks/training"
+- name: "GitHub dev page"
+ url: "https://github.com/mlcommons/training"
+- name: "MLCommons CM automation (under development)"
+ url: "https://github.com/mlcommons/ck/blob/master/docs/tutorials/reproduce-mlperf-training.md"
+
+supported_compute:
+- cpu,x64
+- gpu,nvidia
+- tpu,google
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/_cm.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/_cm.yaml
new file mode 100644
index 0000000000..334bd4d94c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/_cm.yaml
@@ -0,0 +1,45 @@
+alias: benchmark-run-mlperf-inference-v3.1
+uid: 8eb42e27ec984185
+
+automation_alias: cfg
+automation_uid: 88dce9c160324c5d
+
+tags:
+- benchmark
+- run
+- mlperf
+- inference
+- v3.1
+
+name: "MLPerf inference - v3.1"
+
+supported_compute:
+- ee8c568e0ac44f2b
+- fe379ecd1e054a00
+
+bench_uid: 39877bb63fb54725
+
+view_dimensions:
+- - input.device
+ - "MLPerf device"
+- - input.implementation
+ - "MLPerf implementation"
+- - input.backend
+ - "MLPerf backend"
+- - input.model
+ - "MLPerf model"
+- - input.precision
+ - "Model precision"
+- - input.scenario
+ - "MLPerf scenario"
+- - input.host_os
+ - "Host OS"
+- - output.state.cm-mlperf-inference-results-last.performance
+ - "Got performance"
+ - "tick"
+- - output.state.cm-mlperf-inference-results-last.accuracy
+ - "Got accuracy"
+ - "tick"
+- - output.state.cm-mlperf-inference-results-last.power
+ - "Got energy"
+ - "tick"
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-input.json
new file mode 100644
index 0000000000..d1f187f498
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-input.json
@@ -0,0 +1,54 @@
+{
+ "action": "run",
+ "automation": "script",
+ "tags": "run-mlperf-inference,_r4.0,_performance-only,_short",
+ "division": "open",
+ "category": "datacenter",
+ "device": "qaic",
+ "model": "bert-99.9",
+ "precision": "float16",
+ "implementation": "qualcomm",
+ "backend": "glow",
+ "scenario": "Offline",
+ "execution_mode": "test",
+ "power": "no",
+ "adr": {
+ "python": {
+ "version_min": "3.8"
+ }
+ },
+ "clean": true,
+ "compliance": "no",
+ "j": true,
+ "jf": "run-0eeb9799b12b488f",
+ "quiet": true,
+ "time": true,
+ "host_os": "linux",
+ "cmd": [
+ "--tags=run-mlperf-inference,_r4.0,_performance-only,_short",
+ "--division=open",
+ "--category=datacenter",
+ "--device=qaic",
+ "--model=bert-99.9",
+ "--precision=float16",
+ "--implementation=qualcomm",
+ "--backend=glow",
+ "--scenario=Offline",
+ "--execution_mode=test",
+ "--power=no",
+ "--adr.python.version_min=3.8",
+ "--clean",
+ "--compliance=no",
+ "--j",
+ "--quiet",
+ "--time",
+ "--host_os=linux"
+ ],
+ "out": "con",
+ "parsed_automation": [
+ [
+ "script",
+ "5b4e0237da074764"
+ ]
+ ]
+}
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-meta.json
new file mode 100644
index 0000000000..a9243fe3ce
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-meta.json
@@ -0,0 +1,9 @@
+{
+ "uid": "800fe1b33ca443da",
+ "compute_uid": "d2ae645066664463",
+ "bench_uid": "39877bb63fb54725",
+ "date_time": "2024-02-20T15:25:03.786139",
+ "functional": true,
+ "reproduced": true,
+ "support_docker": true
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-output.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-output.json
new file mode 100644
index 0000000000..a07a992e76
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-output.json
@@ -0,0 +1,11 @@
+{
+ "return": 0,
+ "env": {},
+ "new_env": {},
+ "state": {
+ "cm-mlperf-inference-results-last": {
+ "performance": "tested-will-be-added-in-v4.0",
+ "performance_valid": true
+ }
+ }
+}
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-input.json
new file mode 100644
index 0000000000..1fe11d6d51
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-input.json
@@ -0,0 +1,55 @@
+{
+ "action": "run",
+ "automation": "script",
+ "tags": "run-mlperf-inference,_r4.0,_performance-only,_short",
+ "division": "open",
+ "category": "edge",
+ "device": "cpu",
+ "model": "bert-99",
+ "precision": "int8",
+ "implementation": "reference",
+ "backend": "deepsparse",
+ "scenario": "Offline",
+ "execution_mode": "test",
+ "power": "no",
+ "adr": {
+ "python": {
+ "version_min": "3.8"
+ }
+ },
+ "clean": true,
+ "compliance": "no",
+ "j": true,
+ "jf": "run-0eeb9799b12b488f",
+ "quiet": true,
+ "time": true,
+ "host_os": "linux",
+ "cmd": [
+ "--tags=run-mlperf-inference,_r4.0,_performance-only,_short",
+ "--division=open",
+ "--category=edge",
+ "--device=cpu",
+ "--model=bert-99",
+ "--precision=int8",
+ "--implementation=reference",
+ "--backend=deepsparse",
+ "--scenario=Offline",
+ "--execution_mode=test",
+ "--power=no",
+ "--adr.python.version_min=3.8",
+ "--clean",
+ "--compliance=no",
+ "--j",
+ "--jf=run-0eeb9799b12b488f",
+ "--quiet",
+ "--time",
+ "--host_os=linux"
+ ],
+ "out": "con",
+ "parsed_automation": [
+ [
+ "script",
+ "5b4e0237da074764"
+ ]
+ ]
+}
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-meta.json
new file mode 100644
index 0000000000..dbd58de078
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-meta.json
@@ -0,0 +1,9 @@
+{
+ "uid": "12242042335e4bc8",
+ "compute_uid": "ee8c568e0ac44f2b",
+ "bench_uid": "39877bb63fb54725",
+ "date_time": "2024-02-20T15:15:53.984671",
+ "functional": true,
+ "reproduced": true,
+ "support_docker": true
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-output.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-output.json
new file mode 100644
index 0000000000..519ddf3a3b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-output.json
@@ -0,0 +1,137 @@
+{
+ "return": 0,
+ "env": {},
+ "new_env": {},
+ "state": {
+ "app_mlperf_inference_log_summary": {
+ "sut name": "PySUT",
+ "scenario": "Offline",
+ "mode": "PerformanceOnly",
+ "samples per second": "12.4548",
+ "result is": "VALID",
+ "min duration satisfied": "Yes",
+ "min queries satisfied": "Yes",
+ "early stopping satisfied": "Yes",
+ "min latency (ns)": "64039368",
+ "max latency (ns)": "802905050",
+ "mean latency (ns)": "372956875",
+ "50.00 percentile latency (ns)": "378435867",
+ "90.00 percentile latency (ns)": "802905050",
+ "95.00 percentile latency (ns)": "802905050",
+ "97.00 percentile latency (ns)": "802905050",
+ "99.00 percentile latency (ns)": "802905050",
+ "99.90 percentile latency (ns)": "802905050",
+ "samples_per_query": "10",
+ "target_qps": "1",
+ "target_latency (ns)": "0",
+ "max_async_queries": "1",
+ "min_duration (ms)": "0",
+ "max_duration (ms)": "0",
+ "min_query_count": "1",
+ "max_query_count": "10",
+ "qsl_rng_seed": "13281865557512327830",
+ "sample_index_rng_seed": "198141574272810017",
+ "schedule_rng_seed": "7575108116881280410",
+ "accuracy_log_rng_seed": "0",
+ "accuracy_log_probability": "0",
+ "accuracy_log_sampling_target": "0",
+ "print_timestamps": "0",
+ "performance_issue_unique": "0",
+ "performance_issue_same": "0",
+ "performance_issue_same_index": "0",
+ "performance_sample_count": "10833"
+ },
+ "app_mlperf_inference_measurements": {
+ "starting_weights_filename": "https://github.com/mlcommons/inference_results_v2.1/raw/master/open/NeuralMagic/code/bert/deepsparse/models/oBERT-Large_95sparse_block4_qat.onnx.tar.xz",
+ "retraining": "no",
+ "input_data_types": "fp32",
+ "weight_data_types": "fp32",
+ "weight_transformations": "none"
+ },
+ "cm-mlperf-inference-results": {
+ "ip_172_31_89_56-reference-cpu-deepsparse-vdefault-default_config": {
+ "bert-99": {
+ "Offline": {
+ "performance": "12.455",
+ "performance_valid": true
+ }
+ }
+ }
+ },
+ "cm-mlperf-inference-results-last": {
+ "performance": "12.455",
+ "performance_valid": true
+ }
+ },
+ "new_state": {
+ "app_mlperf_inference_log_summary": {
+ "sut name": "PySUT",
+ "scenario": "Offline",
+ "mode": "PerformanceOnly",
+ "samples per second": "12.4548",
+ "result is": "VALID",
+ "min duration satisfied": "Yes",
+ "min queries satisfied": "Yes",
+ "early stopping satisfied": "Yes",
+ "min latency (ns)": "64039368",
+ "max latency (ns)": "802905050",
+ "mean latency (ns)": "372956875",
+ "50.00 percentile latency (ns)": "378435867",
+ "90.00 percentile latency (ns)": "802905050",
+ "95.00 percentile latency (ns)": "802905050",
+ "97.00 percentile latency (ns)": "802905050",
+ "99.00 percentile latency (ns)": "802905050",
+ "99.90 percentile latency (ns)": "802905050",
+ "samples_per_query": "10",
+ "target_qps": "1",
+ "target_latency (ns)": "0",
+ "max_async_queries": "1",
+ "min_duration (ms)": "0",
+ "max_duration (ms)": "0",
+ "min_query_count": "1",
+ "max_query_count": "10",
+ "qsl_rng_seed": "13281865557512327830",
+ "sample_index_rng_seed": "198141574272810017",
+ "schedule_rng_seed": "7575108116881280410",
+ "accuracy_log_rng_seed": "0",
+ "accuracy_log_probability": "0",
+ "accuracy_log_sampling_target": "0",
+ "print_timestamps": "0",
+ "performance_issue_unique": "0",
+ "performance_issue_same": "0",
+ "performance_issue_same_index": "0",
+ "performance_sample_count": "10833"
+ },
+ "app_mlperf_inference_measurements": {
+ "starting_weights_filename": "https://github.com/mlcommons/inference_results_v2.1/raw/master/open/NeuralMagic/code/bert/deepsparse/models/oBERT-Large_95sparse_block4_qat.onnx.tar.xz",
+ "retraining": "no",
+ "input_data_types": "fp32",
+ "weight_data_types": "fp32",
+ "weight_transformations": "none"
+ },
+ "cm-mlperf-inference-results": {
+ "ip_172_31_89_56-reference-cpu-deepsparse-vdefault-default_config": {
+ "bert-99": {
+ "Offline": {
+ "performance": "12.455",
+ "performance_valid": true
+ }
+ }
+ }
+ },
+ "cm-mlperf-inference-results-last": {
+ "performance": "12.455",
+ "performance_valid": true
+ }
+ },
+ "deps": [
+ "detect,os",
+ "detect,cpu",
+ "get,python3",
+ "get,mlcommons,inference,src",
+ "get,sut,description",
+ "get,mlperf,inference,results,dir",
+ "install,pip-package,for-cmind-python,_package.tabulate",
+ "get,mlperf,inference,utils"
+ ]
+}
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-input.json
new file mode 100644
index 0000000000..b02bb76950
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-input.json
@@ -0,0 +1,55 @@
+{
+ "action": "run",
+ "automation": "script",
+ "tags": "run-mlperf-inference,_r4.0,_performance-only,_short",
+ "division": "open",
+ "category": "edge",
+ "device": "cpu",
+ "model": "bert-99.9",
+ "precision": "float32",
+ "implementation": "reference",
+ "backend": "onnxruntime",
+ "scenario": "Offline",
+ "execution_mode": "test",
+ "power": "no",
+ "adr": {
+ "python": {
+ "version_min": "3.8"
+ }
+ },
+ "clean": true,
+ "compliance": "no",
+ "j": true,
+ "jf": "run-52c1d43172664ed0",
+ "quiet": true,
+ "time": true,
+ "host_os": "linux",
+ "cmd": [
+ "--tags=run-mlperf-inference,_r4.0,_performance-only,_short",
+ "--division=open",
+ "--category=edge",
+ "--device=cpu",
+ "--model=bert-99.9",
+ "--precision=float32",
+ "--implementation=reference",
+ "--backend=onnxruntime",
+ "--scenario=Offline",
+ "--execution_mode=test",
+ "--power=no",
+ "--adr.python.version_min=3.8",
+ "--clean",
+ "--compliance=no",
+ "--j",
+ "--jf=run-52c1d43172664ed0",
+ "--quiet",
+ "--time",
+ "--host_os=linux"
+ ],
+ "out": "con",
+ "parsed_automation": [
+ [
+ "script",
+ "5b4e0237da074764"
+ ]
+ ]
+}
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-meta.json
new file mode 100644
index 0000000000..7b7b419f34
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-meta.json
@@ -0,0 +1,9 @@
+{
+ "uid": "52c1d43172664ed0",
+ "compute_uid": "ee8c568e0ac44f2b",
+ "bench_uid": "39877bb63fb54725",
+ "date_time": "2024-02-20T15:04:13.424211",
+ "functional": true,
+ "reproduced": true,
+ "support_docker": true
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-output.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-output.json
new file mode 100644
index 0000000000..c250f0c626
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-output.json
@@ -0,0 +1,137 @@
+{
+ "return": 0,
+ "env": {},
+ "new_env": {},
+ "state": {
+ "app_mlperf_inference_log_summary": {
+ "sut name": "PySUT",
+ "scenario": "Offline",
+ "mode": "PerformanceOnly",
+ "samples per second": "0.615377",
+ "result is": "VALID",
+ "min duration satisfied": "Yes",
+ "min queries satisfied": "Yes",
+ "early stopping satisfied": "Yes",
+ "min latency (ns)": "4705323615",
+ "max latency (ns)": "16250190121",
+ "mean latency (ns)": "10456508889",
+ "50.00 percentile latency (ns)": "10133038152",
+ "90.00 percentile latency (ns)": "16250190121",
+ "95.00 percentile latency (ns)": "16250190121",
+ "97.00 percentile latency (ns)": "16250190121",
+ "99.00 percentile latency (ns)": "16250190121",
+ "99.90 percentile latency (ns)": "16250190121",
+ "samples_per_query": "10",
+ "target_qps": "1",
+ "target_latency (ns)": "0",
+ "max_async_queries": "1",
+ "min_duration (ms)": "0",
+ "max_duration (ms)": "0",
+ "min_query_count": "1",
+ "max_query_count": "10",
+ "qsl_rng_seed": "13281865557512327830",
+ "sample_index_rng_seed": "198141574272810017",
+ "schedule_rng_seed": "7575108116881280410",
+ "accuracy_log_rng_seed": "0",
+ "accuracy_log_probability": "0",
+ "accuracy_log_sampling_target": "0",
+ "print_timestamps": "0",
+ "performance_issue_unique": "0",
+ "performance_issue_same": "0",
+ "performance_issue_same_index": "0",
+ "performance_sample_count": "10833"
+ },
+ "app_mlperf_inference_measurements": {
+ "starting_weights_filename": "https://armi.in/files/model.onnx",
+ "retraining": "no",
+ "input_data_types": "fp32",
+ "weight_data_types": "fp32",
+ "weight_transformations": "none"
+ },
+ "cm-mlperf-inference-results": {
+ "ip_172_31_89_56-reference-cpu-onnxruntime-v1.17.0-default_config": {
+ "bert-99.9": {
+ "Offline": {
+ "performance": "0.615",
+ "performance_valid": true
+ }
+ }
+ }
+ },
+ "cm-mlperf-inference-results-last": {
+ "performance": "0.615",
+ "performance_valid": true
+ }
+ },
+ "new_state": {
+ "app_mlperf_inference_log_summary": {
+ "sut name": "PySUT",
+ "scenario": "Offline",
+ "mode": "PerformanceOnly",
+ "samples per second": "0.615377",
+ "result is": "VALID",
+ "min duration satisfied": "Yes",
+ "min queries satisfied": "Yes",
+ "early stopping satisfied": "Yes",
+ "min latency (ns)": "4705323615",
+ "max latency (ns)": "16250190121",
+ "mean latency (ns)": "10456508889",
+ "50.00 percentile latency (ns)": "10133038152",
+ "90.00 percentile latency (ns)": "16250190121",
+ "95.00 percentile latency (ns)": "16250190121",
+ "97.00 percentile latency (ns)": "16250190121",
+ "99.00 percentile latency (ns)": "16250190121",
+ "99.90 percentile latency (ns)": "16250190121",
+ "samples_per_query": "10",
+ "target_qps": "1",
+ "target_latency (ns)": "0",
+ "max_async_queries": "1",
+ "min_duration (ms)": "0",
+ "max_duration (ms)": "0",
+ "min_query_count": "1",
+ "max_query_count": "10",
+ "qsl_rng_seed": "13281865557512327830",
+ "sample_index_rng_seed": "198141574272810017",
+ "schedule_rng_seed": "7575108116881280410",
+ "accuracy_log_rng_seed": "0",
+ "accuracy_log_probability": "0",
+ "accuracy_log_sampling_target": "0",
+ "print_timestamps": "0",
+ "performance_issue_unique": "0",
+ "performance_issue_same": "0",
+ "performance_issue_same_index": "0",
+ "performance_sample_count": "10833"
+ },
+ "app_mlperf_inference_measurements": {
+ "starting_weights_filename": "https://armi.in/files/model.onnx",
+ "retraining": "no",
+ "input_data_types": "fp32",
+ "weight_data_types": "fp32",
+ "weight_transformations": "none"
+ },
+ "cm-mlperf-inference-results": {
+ "ip_172_31_89_56-reference-cpu-onnxruntime-v1.17.0-default_config": {
+ "bert-99.9": {
+ "Offline": {
+ "performance": "0.615",
+ "performance_valid": true
+ }
+ }
+ }
+ },
+ "cm-mlperf-inference-results-last": {
+ "performance": "0.615",
+ "performance_valid": true
+ }
+ },
+ "deps": [
+ "detect,os",
+ "detect,cpu",
+ "get,python3",
+ "get,mlcommons,inference,src",
+ "get,sut,description",
+ "get,mlperf,inference,results,dir",
+ "install,pip-package,for-cmind-python,_package.tabulate",
+ "get,mlperf,inference,utils"
+ ]
+}
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-input.json
new file mode 100644
index 0000000000..2addebee94
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-input.json
@@ -0,0 +1,56 @@
+{
+ "action": "run",
+ "automation": "script",
+ "tags": "run-mlperf-inference,_r4.0,_submission,_short",
+ "division": "open",
+ "category": "edge",
+ "device": "cuda",
+ "model": "bert-99",
+ "host_os": "linux",
+ "precision": "float32",
+ "implementation": "nvidia-original",
+ "backend": "tensorrt",
+ "scenario": "Offline",
+ "execution_mode": "test",
+ "submitter": "CTuning",
+ "power": "no",
+ "adr": {
+ "python": {
+ "version_min": "3.8"
+ }
+ },
+ "compliance": "no",
+ "j": true,
+ "time": true,
+ "clean": true,
+ "quiet": true,
+ "jf": "mlperf-inference-results",
+ "cmd": [
+ "--tags=run-mlperf-inference,_r4.0,_submission,_short",
+ "--division=open",
+ "--category=edge",
+ "--device=cuda",
+ "--model=bert-99",
+ "--precision=float32",
+ "--implementation=nvidia-original",
+ "--backend=tensorrt",
+ "--scenario=Offline",
+ "--execution_mode=test",
+ "--submitter=CTuning",
+ "--power=no",
+ "--adr.python.version_min=3.8",
+ "--compliance=no",
+ "--j",
+ "--time",
+ "--clean",
+ "--quiet",
+ "--jf=mlperf-inference-results"
+ ],
+ "out": "con",
+ "parsed_automation": [
+ [
+ "script",
+ "5b4e0237da074764"
+ ]
+ ]
+}
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-meta.json
new file mode 100644
index 0000000000..0e5dcba611
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-meta.json
@@ -0,0 +1,9 @@
+{
+ "uid": "66cce585ff0242bc",
+ "compute_uid": "fe379ecd1e054a00",
+ "bench_uid": "39877bb63fb54725",
+ "date_time": "2024-02-20T16:23:59.000629",
+ "functional": true,
+ "reproduced": true,
+ "support_docker": true
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-input.json
new file mode 100644
index 0000000000..82a0cc826b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-input.json
@@ -0,0 +1,56 @@
+{
+ "action": "run",
+ "automation": "script",
+ "tags": "run-mlperf-inference,_r4.0,_submission,_short",
+ "division": "open",
+ "category": "edge",
+ "device": "cpu",
+ "model": "bert-99",
+ "host_os": "linux",
+ "precision": "float32",
+ "implementation": "reference",
+ "backend": "tf",
+ "scenario": "Offline",
+ "execution_mode": "test",
+ "submitter": "CTuning",
+ "power": "no",
+ "adr": {
+ "python": {
+ "version_min": "3.8"
+ }
+ },
+ "compliance": "no",
+ "j": true,
+ "time": true,
+ "clean": true,
+ "quiet": true,
+ "jf": "mlperf-inference-results",
+ "cmd": [
+ "--tags=run-mlperf-inference,_r4.0,_submission,_short",
+ "--division=open",
+ "--category=edge",
+ "--device=cpu",
+ "--model=bert-99",
+ "--precision=float32",
+ "--implementation=reference",
+ "--backend=tf",
+ "--scenario=Offline",
+ "--execution_mode=test",
+ "--submitter=CTuning",
+ "--power=no",
+ "--adr.python.version_min=3.8",
+ "--compliance=no",
+ "--j",
+ "--time",
+ "--clean",
+ "--quiet",
+ "--jf=mlperf-inference-results"
+ ],
+ "out": "con",
+ "parsed_automation": [
+ [
+ "script",
+ "5b4e0237da074764"
+ ]
+ ]
+}
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-meta.json
new file mode 100644
index 0000000000..3bde194aba
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-meta.json
@@ -0,0 +1,9 @@
+{
+ "uid": "6a07cf881dee462a",
+ "compute_uid": "ee8c568e0ac44f2b",
+ "bench_uid": "39877bb63fb54725",
+ "date_time": "2024-02-20T15:33:11.932584",
+ "functional": false,
+ "reproduced": false,
+ "support_docker": false
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-input.json
new file mode 100644
index 0000000000..de6e2b2c93
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-input.json
@@ -0,0 +1,55 @@
+{
+ "action": "run",
+ "automation": "script",
+ "tags": "run-mlperf-inference,_r4.0,_performance-only,_short",
+ "division": "open",
+ "category": "edge",
+ "device": "cuda",
+ "model": "bert-99",
+ "precision": "float32",
+ "implementation": "reference",
+ "backend": "onnxruntime",
+ "scenario": "Offline",
+ "execution_mode": "test",
+ "power": "no",
+ "adr": {
+ "python": {
+ "version_min": "3.8"
+ }
+ },
+ "clean": true,
+ "compliance": "no",
+ "j": true,
+ "jf": "run-7d80f464b2274742",
+ "quiet": true,
+ "time": true,
+ "host_os": "linux",
+ "cmd": [
+ "--tags=run-mlperf-inference,_r4.0,_performance-only,_short",
+ "--division=open",
+ "--category=edge",
+ "--device=cuda",
+ "--model=bert-99",
+ "--precision=float32",
+ "--implementation=reference",
+ "--backend=onnxruntime",
+ "--scenario=Offline",
+ "--execution_mode=test",
+ "--power=no",
+ "--adr.python.version_min=3.8",
+ "--clean",
+ "--compliance=no",
+ "--j",
+ "--jf=run-7d80f464b2274742",
+ "--quiet",
+ "--time",
+ "--host_os=linux"
+ ],
+ "out": "con",
+ "parsed_automation": [
+ [
+ "script",
+ "5b4e0237da074764"
+ ]
+ ]
+}
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-meta.json
new file mode 100644
index 0000000000..eadf7f2014
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-meta.json
@@ -0,0 +1,10 @@
+{
+ "uid": "7d80f464b2274742",
+ "compute_uid": "fe379ecd1e054a00",
+ "bench_uid": "39877bb63fb54725",
+ "date_time": "2024-02-20T16:04:27.903539",
+ "notes":"ONNX 1.15.0 worked; ONNX 1.17.0 did not work",
+ "functional": true,
+ "reproduced": true,
+ "support_docker": false
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-output.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-output.json
new file mode 100644
index 0000000000..5d8f74da15
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-output.json
@@ -0,0 +1,137 @@
+{
+ "return": 0,
+ "env": {},
+ "new_env": {},
+ "state": {
+ "app_mlperf_inference_log_summary": {
+ "sut name": "PySUT",
+ "scenario": "Offline",
+ "mode": "PerformanceOnly",
+ "samples per second": "13.1969",
+ "result is": "VALID",
+ "min duration satisfied": "Yes",
+ "min queries satisfied": "Yes",
+ "early stopping satisfied": "Yes",
+ "min latency (ns)": "295840204",
+ "max latency (ns)": "757755274",
+ "mean latency (ns)": "521501098",
+ "50.00 percentile latency (ns)": "497153427",
+ "90.00 percentile latency (ns)": "757755274",
+ "95.00 percentile latency (ns)": "757755274",
+ "97.00 percentile latency (ns)": "757755274",
+ "99.00 percentile latency (ns)": "757755274",
+ "99.90 percentile latency (ns)": "757755274",
+ "samples_per_query": "10",
+ "target_qps": "1",
+ "target_latency (ns)": "0",
+ "max_async_queries": "1",
+ "min_duration (ms)": "0",
+ "max_duration (ms)": "0",
+ "min_query_count": "1",
+ "max_query_count": "10",
+ "qsl_rng_seed": "13281865557512327830",
+ "sample_index_rng_seed": "198141574272810017",
+ "schedule_rng_seed": "7575108116881280410",
+ "accuracy_log_rng_seed": "0",
+ "accuracy_log_probability": "0",
+ "accuracy_log_sampling_target": "0",
+ "print_timestamps": "0",
+ "performance_issue_unique": "0",
+ "performance_issue_same": "0",
+ "performance_issue_same_index": "0",
+ "performance_sample_count": "10833"
+ },
+ "app_mlperf_inference_measurements": {
+ "starting_weights_filename": "https://armi.in/files/model.onnx",
+ "retraining": "no",
+ "input_data_types": "fp32",
+ "weight_data_types": "fp32",
+ "weight_transformations": "none"
+ },
+ "cm-mlperf-inference-results": {
+ "ip_172_31_89_56-reference-gpu-onnxruntime-v1.15.0-default_config": {
+ "bert-99": {
+ "Offline": {
+ "performance": "13.197",
+ "performance_valid": true
+ }
+ }
+ }
+ },
+ "cm-mlperf-inference-results-last": {
+ "performance": "13.197",
+ "performance_valid": true
+ }
+ },
+ "new_state": {
+ "app_mlperf_inference_log_summary": {
+ "sut name": "PySUT",
+ "scenario": "Offline",
+ "mode": "PerformanceOnly",
+ "samples per second": "13.1969",
+ "result is": "VALID",
+ "min duration satisfied": "Yes",
+ "min queries satisfied": "Yes",
+ "early stopping satisfied": "Yes",
+ "min latency (ns)": "295840204",
+ "max latency (ns)": "757755274",
+ "mean latency (ns)": "521501098",
+ "50.00 percentile latency (ns)": "497153427",
+ "90.00 percentile latency (ns)": "757755274",
+ "95.00 percentile latency (ns)": "757755274",
+ "97.00 percentile latency (ns)": "757755274",
+ "99.00 percentile latency (ns)": "757755274",
+ "99.90 percentile latency (ns)": "757755274",
+ "samples_per_query": "10",
+ "target_qps": "1",
+ "target_latency (ns)": "0",
+ "max_async_queries": "1",
+ "min_duration (ms)": "0",
+ "max_duration (ms)": "0",
+ "min_query_count": "1",
+ "max_query_count": "10",
+ "qsl_rng_seed": "13281865557512327830",
+ "sample_index_rng_seed": "198141574272810017",
+ "schedule_rng_seed": "7575108116881280410",
+ "accuracy_log_rng_seed": "0",
+ "accuracy_log_probability": "0",
+ "accuracy_log_sampling_target": "0",
+ "print_timestamps": "0",
+ "performance_issue_unique": "0",
+ "performance_issue_same": "0",
+ "performance_issue_same_index": "0",
+ "performance_sample_count": "10833"
+ },
+ "app_mlperf_inference_measurements": {
+ "starting_weights_filename": "https://armi.in/files/model.onnx",
+ "retraining": "no",
+ "input_data_types": "fp32",
+ "weight_data_types": "fp32",
+ "weight_transformations": "none"
+ },
+ "cm-mlperf-inference-results": {
+ "ip_172_31_89_56-reference-gpu-onnxruntime-v1.15.0-default_config": {
+ "bert-99": {
+ "Offline": {
+ "performance": "13.197",
+ "performance_valid": true
+ }
+ }
+ }
+ },
+ "cm-mlperf-inference-results-last": {
+ "performance": "13.197",
+ "performance_valid": true
+ }
+ },
+ "deps": [
+ "detect,os",
+ "detect,cpu",
+ "get,python3",
+ "get,mlcommons,inference,src",
+ "get,sut,description",
+ "get,mlperf,inference,results,dir",
+ "install,pip-package,for-cmind-python,_package.tabulate",
+ "get,mlperf,inference,utils"
+ ]
+}
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-input.json
new file mode 100644
index 0000000000..c72a9f6a27
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-input.json
@@ -0,0 +1,56 @@
+{
+ "action": "run",
+ "automation": "script",
+ "tags": "run-mlperf-inference,_r4.0,_submission,_short",
+ "division": "open",
+ "category": "edge",
+ "host_os": "linux",
+ "device": "cpu",
+ "model": "retinanet",
+ "precision": "float32",
+ "implementation": "reference",
+ "backend": "onnxruntime",
+ "scenario": "Offline",
+ "execution_mode": "test",
+ "submitter": "CTuning",
+ "power": "no",
+ "adr": {
+ "python": {
+ "version_min": "3.8"
+ }
+ },
+ "compliance": "no",
+ "j": true,
+ "time": true,
+ "clean": true,
+ "quiet": true,
+ "jf": "mlperf-inference-results",
+ "cmd": [
+ "--tags=run-mlperf-inference,_r4.0,_submission,_short",
+ "--division=open",
+ "--category=edge",
+ "--device=cpu",
+ "--model=retinanet",
+ "--precision=float32",
+ "--implementation=reference",
+ "--backend=onnxruntime",
+ "--scenario=Offline",
+ "--execution_mode=test",
+ "--submitter=CTuning",
+ "--power=no",
+ "--adr.python.version_min=3.8",
+ "--compliance=no",
+ "--j",
+ "--time",
+ "--clean",
+ "--quiet",
+ "--jf=mlperf-inference-results"
+ ],
+ "out": "con",
+ "parsed_automation": [
+ [
+ "script",
+ "5b4e0237da074764"
+ ]
+ ]
+}
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-meta.json
new file mode 100644
index 0000000000..2b86368970
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-meta.json
@@ -0,0 +1,9 @@
+{
+ "uid": "7f094c244ebb4985",
+ "compute_uid": "ee8c568e0ac44f2b",
+ "bench_uid": "39877bb63fb54725",
+ "date_time": "2024-02-18",
+ "functional": true,
+ "reproduced": true,
+ "support_docker": true
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-output.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-output.json
new file mode 100644
index 0000000000..cae36b057d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-output.json
@@ -0,0 +1,146 @@
+{
+ "return": 0,
+ "env": {},
+ "new_env": {},
+ "state": {
+ "app_mlperf_inference_log_summary": {
+ "sut name": "PySUT",
+ "scenario": "Offline",
+ "mode": "PerformanceOnly",
+ "samples per second": "0.808629",
+ "result is": "VALID",
+ "min duration satisfied": "Yes",
+ "min queries satisfied": "Yes",
+ "early stopping satisfied": "Yes",
+ "min latency (ns)": "963485100",
+ "max latency (ns)": "12366604800",
+ "mean latency (ns)": "5961694610",
+ "50.00 percentile latency (ns)": "6164791100",
+ "90.00 percentile latency (ns)": "12366604800",
+ "95.00 percentile latency (ns)": "12366604800",
+ "97.00 percentile latency (ns)": "12366604800",
+ "99.00 percentile latency (ns)": "12366604800",
+ "99.90 percentile latency (ns)": "12366604800",
+ "samples_per_query": "10",
+ "target_qps": "1",
+ "target_latency (ns)": "0",
+ "max_async_queries": "1",
+ "min_duration (ms)": "0",
+ "max_duration (ms)": "0",
+ "min_query_count": "1",
+ "max_query_count": "10",
+ "qsl_rng_seed": "13281865557512327830",
+ "sample_index_rng_seed": "198141574272810017",
+ "schedule_rng_seed": "7575108116881280410",
+ "accuracy_log_rng_seed": "0",
+ "accuracy_log_probability": "0",
+ "accuracy_log_sampling_target": "0",
+ "print_timestamps": "0",
+ "performance_issue_unique": "0",
+ "performance_issue_same": "0",
+ "performance_issue_same_index": "0",
+ "performance_sample_count": "64"
+ },
+ "app_mlperf_inference_measurements": {
+ "starting_weights_filename": "resnext50_32x4d_fpn.onnx",
+ "retraining": "no",
+ "input_data_types": "fp32",
+ "weight_data_types": "fp32",
+ "weight_transformations": "no"
+ },
+ "cm-mlperf-inference-results": {
+ "FGG_LENOVO_P14S-reference-cpu-onnxruntime-v1.16.0-default_config": {
+ "retinanet": {
+ "Offline": {
+ "performance": "0.809",
+ "performance_valid": true,
+ "accuracy": "49.593",
+ "accuracy_valid": true
+ }
+ }
+ }
+ },
+ "cm-mlperf-inference-results-last": {
+ "performance": "0.809",
+ "performance_valid": true,
+ "accuracy": "49.593",
+ "accuracy_valid": true
+ }
+ },
+ "new_state": {
+ "app_mlperf_inference_log_summary": {
+ "sut name": "PySUT",
+ "scenario": "Offline",
+ "mode": "PerformanceOnly",
+ "samples per second": "0.808629",
+ "result is": "VALID",
+ "min duration satisfied": "Yes",
+ "min queries satisfied": "Yes",
+ "early stopping satisfied": "Yes",
+ "min latency (ns)": "963485100",
+ "max latency (ns)": "12366604800",
+ "mean latency (ns)": "5961694610",
+ "50.00 percentile latency (ns)": "6164791100",
+ "90.00 percentile latency (ns)": "12366604800",
+ "95.00 percentile latency (ns)": "12366604800",
+ "97.00 percentile latency (ns)": "12366604800",
+ "99.00 percentile latency (ns)": "12366604800",
+ "99.90 percentile latency (ns)": "12366604800",
+ "samples_per_query": "10",
+ "target_qps": "1",
+ "target_latency (ns)": "0",
+ "max_async_queries": "1",
+ "min_duration (ms)": "0",
+ "max_duration (ms)": "0",
+ "min_query_count": "1",
+ "max_query_count": "10",
+ "qsl_rng_seed": "13281865557512327830",
+ "sample_index_rng_seed": "198141574272810017",
+ "schedule_rng_seed": "7575108116881280410",
+ "accuracy_log_rng_seed": "0",
+ "accuracy_log_probability": "0",
+ "accuracy_log_sampling_target": "0",
+ "print_timestamps": "0",
+ "performance_issue_unique": "0",
+ "performance_issue_same": "0",
+ "performance_issue_same_index": "0",
+ "performance_sample_count": "64"
+ },
+ "app_mlperf_inference_measurements": {
+ "starting_weights_filename": "resnext50_32x4d_fpn.onnx",
+ "retraining": "no",
+ "input_data_types": "fp32",
+ "weight_data_types": "fp32",
+ "weight_transformations": "no"
+ },
+ "cm-mlperf-inference-results": {
+ "FGG_LENOVO_P14S-reference-cpu-onnxruntime-v1.16.0-default_config": {
+ "retinanet": {
+ "Offline": {
+ "performance": "0.809",
+ "performance_valid": true,
+ "accuracy": "49.593",
+ "accuracy_valid": true
+ }
+ }
+ }
+ },
+ "cm-mlperf-inference-results-last": {
+ "performance": "0.809",
+ "performance_valid": true,
+ "accuracy": "49.593",
+ "accuracy_valid": true
+ }
+ },
+ "deps": [
+ "detect,os",
+ "detect,cpu",
+ "get,python3",
+ "get,mlcommons,inference,src",
+ "get,sut,description",
+ "get,mlperf,inference,results,dir",
+ "install,pip-package,for-cmind-python,_package.tabulate",
+ "get,mlperf,inference,utils",
+ "generate,mlperf,inference,submission"
+ ]
+}
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985.md b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985.md
new file mode 100644
index 0000000000..6b58ae634a
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985.md
@@ -0,0 +1 @@
+TBD1
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-input.json
new file mode 100644
index 0000000000..fb7e74af53
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-input.json
@@ -0,0 +1,53 @@
+{
+ "action": "run",
+ "automation": "script",
+ "tags": "run-mlperf-inference,_r4.0,_performance-only,_short",
+ "division": "open",
+ "category": "edge",
+ "device": "cpu",
+ "model": "bert-99",
+ "precision": "uint8",
+ "implementation": "intel-original",
+ "backend": "pytorch",
+ "scenario": "Offline",
+ "execution_mode": "test",
+ "power": "no",
+ "adr": {
+ "python": {
+ "version_min": "3.8"
+ }
+ },
+ "clean": true,
+ "compliance": "no",
+ "j": true,
+ "jf": "run-d8c0f02f52bf49ae",
+ "time": true,
+ "host_os": "linux",
+ "cmd": [
+ "--tags=run-mlperf-inference,_r4.0,_performance-only,_short",
+ "--division=open",
+ "--category=edge",
+ "--device=cpu",
+ "--model=bert-99",
+ "--precision=uint8",
+ "--implementation=intel-original",
+ "--backend=pytorch",
+ "--scenario=Offline",
+ "--execution_mode=test",
+ "--power=no",
+ "--adr.python.version_min=3.8",
+ "--clean",
+ "--compliance=no",
+ "--j",
+ "--jf=run-d8c0f02f52bf49ae",
+ "--time",
+ "--host_os=linux"
+ ],
+ "out": "con",
+ "parsed_automation": [
+ [
+ "script",
+ "5b4e0237da074764"
+ ]
+ ]
+}
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-meta.json
new file mode 100644
index 0000000000..adf9c9f9f1
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-meta.json
@@ -0,0 +1,9 @@
+{
+ "uid": "d5b6b5af6d794045",
+ "compute_uid": "ee8c568e0ac44f2b",
+ "bench_uid": "39877bb63fb54725",
+ "date_time": "2024-02-20T16:18:34.632335",
+ "functional": true,
+ "reproduced": true,
+ "support_docker": true
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-input.json
new file mode 100644
index 0000000000..d23c116653
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-input.json
@@ -0,0 +1,53 @@
+{
+ "action": "run",
+ "automation": "script",
+ "tags": "run-mlperf-inference,_r4.0,_performance-only,_short",
+ "division": "open",
+ "category": "edge",
+ "device": "cpu",
+ "model": "retinanet",
+ "precision": "float32",
+ "implementation": "mil",
+ "backend": "onnxruntime",
+ "scenario": "Offline",
+ "execution_mode": "test",
+ "power": "no",
+ "adr": {
+ "python": {
+ "version_min": "3.8"
+ }
+ },
+ "clean": true,
+ "compliance": "no",
+ "j": true,
+ "jf": "run-d8c0f02f52bf49ae",
+ "time": true,
+ "host_os": "linux",
+ "cmd": [
+ "--tags=run-mlperf-inference,_r4.0,_performance-only,_short",
+ "--division=open",
+ "--category=edge",
+ "--device=cpu",
+ "--model=retinanet",
+ "--precision=float32",
+ "--implementation=mil",
+ "--backend=onnxruntime",
+ "--scenario=Offline",
+ "--execution_mode=test",
+ "--power=no",
+ "--adr.python.version_min=3.8",
+ "--clean",
+ "--compliance=no",
+ "--j",
+ "--jf=run-d8c0f02f52bf49ae",
+ "--time",
+ "--host_os=linux"
+ ],
+ "out": "con",
+ "parsed_automation": [
+ [
+ "script",
+ "5b4e0237da074764"
+ ]
+ ]
+}
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-meta.json
new file mode 100644
index 0000000000..b0269fa051
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-meta.json
@@ -0,0 +1,10 @@
+{
+ "uid": "d8c0f02f52bf49ae",
+ "compute_uid": "ee8c568e0ac44f2b",
+ "bench_uid": "39877bb63fb54725",
+ "date_time": "2024-02-20T15:39:15.255021",
+ "notes":"Used clang 14 installed via apt; LLVM 16.0.4 couldn't find llvmgold plugin - need to check ...",
+ "functional": false,
+ "reproduced": false,
+ "support_docker": false
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-output.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-output.json
new file mode 100644
index 0000000000..784796ecc2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-output.json
@@ -0,0 +1,137 @@
+{
+ "return": 0,
+ "env": {},
+ "new_env": {},
+ "state": {
+ "app_mlperf_inference_log_summary": {
+ "sut name": "QueueSUT",
+ "scenario": "Offline",
+ "mode": "PerformanceOnly",
+ "samples per second": "0.452945",
+ "result is": "VALID",
+ "min duration satisfied": "Yes",
+ "min queries satisfied": "Yes",
+ "early stopping satisfied": "Yes",
+ "min latency (ns)": "2550773320",
+ "max latency (ns)": "22077722147",
+ "mean latency (ns)": "12323786694",
+ "50.00 percentile latency (ns)": "13414914364",
+ "90.00 percentile latency (ns)": "22077722147",
+ "95.00 percentile latency (ns)": "22077722147",
+ "97.00 percentile latency (ns)": "22077722147",
+ "99.00 percentile latency (ns)": "22077722147",
+ "99.90 percentile latency (ns)": "22077722147",
+ "samples_per_query": "10",
+ "target_qps": "1",
+ "target_latency (ns)": "0",
+ "max_async_queries": "1",
+ "min_duration (ms)": "0",
+ "max_duration (ms)": "0",
+ "min_query_count": "1",
+ "max_query_count": "10",
+ "qsl_rng_seed": "13281865557512327830",
+ "sample_index_rng_seed": "198141574272810017",
+ "schedule_rng_seed": "7575108116881280410",
+ "accuracy_log_rng_seed": "0",
+ "accuracy_log_probability": "0",
+ "accuracy_log_sampling_target": "0",
+ "print_timestamps": "0",
+ "performance_issue_unique": "0",
+ "performance_issue_same": "0",
+ "performance_issue_same_index": "0",
+ "performance_sample_count": "64"
+ },
+ "app_mlperf_inference_measurements": {
+ "starting_weights_filename": "resnext50_32x4d_fpn.onnx",
+ "retraining": "no",
+ "input_data_types": "fp32",
+ "weight_data_types": "fp32",
+ "weight_transformations": "no"
+ },
+ "cm-mlperf-inference-results": {
+ "ip_172_31_89_56-cpp-cpu-onnxruntime-vdefault-default_config": {
+ "retinanet": {
+ "Offline": {
+ "performance": "0.453",
+ "performance_valid": true
+ }
+ }
+ }
+ },
+ "cm-mlperf-inference-results-last": {
+ "performance": "0.453",
+ "performance_valid": true
+ }
+ },
+ "new_state": {
+ "app_mlperf_inference_log_summary": {
+ "sut name": "QueueSUT",
+ "scenario": "Offline",
+ "mode": "PerformanceOnly",
+ "samples per second": "0.452945",
+ "result is": "VALID",
+ "min duration satisfied": "Yes",
+ "min queries satisfied": "Yes",
+ "early stopping satisfied": "Yes",
+ "min latency (ns)": "2550773320",
+ "max latency (ns)": "22077722147",
+ "mean latency (ns)": "12323786694",
+ "50.00 percentile latency (ns)": "13414914364",
+ "90.00 percentile latency (ns)": "22077722147",
+ "95.00 percentile latency (ns)": "22077722147",
+ "97.00 percentile latency (ns)": "22077722147",
+ "99.00 percentile latency (ns)": "22077722147",
+ "99.90 percentile latency (ns)": "22077722147",
+ "samples_per_query": "10",
+ "target_qps": "1",
+ "target_latency (ns)": "0",
+ "max_async_queries": "1",
+ "min_duration (ms)": "0",
+ "max_duration (ms)": "0",
+ "min_query_count": "1",
+ "max_query_count": "10",
+ "qsl_rng_seed": "13281865557512327830",
+ "sample_index_rng_seed": "198141574272810017",
+ "schedule_rng_seed": "7575108116881280410",
+ "accuracy_log_rng_seed": "0",
+ "accuracy_log_probability": "0",
+ "accuracy_log_sampling_target": "0",
+ "print_timestamps": "0",
+ "performance_issue_unique": "0",
+ "performance_issue_same": "0",
+ "performance_issue_same_index": "0",
+ "performance_sample_count": "64"
+ },
+ "app_mlperf_inference_measurements": {
+ "starting_weights_filename": "resnext50_32x4d_fpn.onnx",
+ "retraining": "no",
+ "input_data_types": "fp32",
+ "weight_data_types": "fp32",
+ "weight_transformations": "no"
+ },
+ "cm-mlperf-inference-results": {
+ "ip_172_31_89_56-cpp-cpu-onnxruntime-vdefault-default_config": {
+ "retinanet": {
+ "Offline": {
+ "performance": "0.453",
+ "performance_valid": true
+ }
+ }
+ }
+ },
+ "cm-mlperf-inference-results-last": {
+ "performance": "0.453",
+ "performance_valid": true
+ }
+ },
+ "deps": [
+ "detect,os",
+ "detect,cpu",
+ "get,python3",
+ "get,mlcommons,inference,src",
+ "get,sut,description",
+ "get,mlperf,inference,results,dir",
+ "install,pip-package,for-cmind-python,_package.tabulate",
+ "get,mlperf,inference,utils"
+ ]
+}
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-input.json
new file mode 100644
index 0000000000..9eabe5cb60
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-input.json
@@ -0,0 +1,56 @@
+{
+ "action": "run",
+ "automation": "script",
+ "tags": "run-mlperf-inference,_r4.0,_submission,_short",
+ "division": "open",
+ "category": "edge",
+ "device": "cpu",
+ "model": "resnet50",
+ "host_os": "windows",
+ "precision": "float32",
+ "implementation": "reference",
+ "backend": "onnxruntime",
+ "scenario": "Offline",
+ "execution_mode": "test",
+ "submitter": "CTuning",
+ "power": "no",
+ "adr": {
+ "python": {
+ "version_min": "3.8"
+ }
+ },
+ "compliance": "no",
+ "j": true,
+ "time": true,
+ "clean": true,
+ "quiet": true,
+ "jf": "mlperf-inference-results",
+ "cmd": [
+ "--tags=run-mlperf-inference,_r4.0,_submission,_short",
+ "--division=open",
+ "--category=edge",
+ "--device=cpu",
+ "--model=retinanet",
+ "--precision=float32",
+ "--implementation=reference",
+ "--backend=onnxruntime",
+ "--scenario=Offline",
+ "--execution_mode=test",
+ "--submitter=CTuning",
+ "--power=no",
+ "--adr.python.version_min=3.8",
+ "--compliance=no",
+ "--j",
+ "--time",
+ "--clean",
+ "--quiet",
+ "--jf=mlperf-inference-results"
+ ],
+ "out": "con",
+ "parsed_automation": [
+ [
+ "script",
+ "5b4e0237da074764"
+ ]
+ ]
+}
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-meta.json
new file mode 100644
index 0000000000..45eb699b96
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-meta.json
@@ -0,0 +1,9 @@
+{
+ "uid": "df843c22cbf54aaf",
+ "compute_uid": "fe379ecd1e054a00",
+ "bench_uid": "39877bb63fb54725",
+ "date_time": "2024-02-18",
+ "functional": true,
+ "reproduced": true,
+ "support_docker": true
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-output.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-output.json
new file mode 100644
index 0000000000..cae36b057d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-output.json
@@ -0,0 +1,146 @@
+{
+ "return": 0,
+ "env": {},
+ "new_env": {},
+ "state": {
+ "app_mlperf_inference_log_summary": {
+ "sut name": "PySUT",
+ "scenario": "Offline",
+ "mode": "PerformanceOnly",
+ "samples per second": "0.808629",
+ "result is": "VALID",
+ "min duration satisfied": "Yes",
+ "min queries satisfied": "Yes",
+ "early stopping satisfied": "Yes",
+ "min latency (ns)": "963485100",
+ "max latency (ns)": "12366604800",
+ "mean latency (ns)": "5961694610",
+ "50.00 percentile latency (ns)": "6164791100",
+ "90.00 percentile latency (ns)": "12366604800",
+ "95.00 percentile latency (ns)": "12366604800",
+ "97.00 percentile latency (ns)": "12366604800",
+ "99.00 percentile latency (ns)": "12366604800",
+ "99.90 percentile latency (ns)": "12366604800",
+ "samples_per_query": "10",
+ "target_qps": "1",
+ "target_latency (ns)": "0",
+ "max_async_queries": "1",
+ "min_duration (ms)": "0",
+ "max_duration (ms)": "0",
+ "min_query_count": "1",
+ "max_query_count": "10",
+ "qsl_rng_seed": "13281865557512327830",
+ "sample_index_rng_seed": "198141574272810017",
+ "schedule_rng_seed": "7575108116881280410",
+ "accuracy_log_rng_seed": "0",
+ "accuracy_log_probability": "0",
+ "accuracy_log_sampling_target": "0",
+ "print_timestamps": "0",
+ "performance_issue_unique": "0",
+ "performance_issue_same": "0",
+ "performance_issue_same_index": "0",
+ "performance_sample_count": "64"
+ },
+ "app_mlperf_inference_measurements": {
+ "starting_weights_filename": "resnext50_32x4d_fpn.onnx",
+ "retraining": "no",
+ "input_data_types": "fp32",
+ "weight_data_types": "fp32",
+ "weight_transformations": "no"
+ },
+ "cm-mlperf-inference-results": {
+ "FGG_LENOVO_P14S-reference-cpu-onnxruntime-v1.16.0-default_config": {
+ "retinanet": {
+ "Offline": {
+ "performance": "0.809",
+ "performance_valid": true,
+ "accuracy": "49.593",
+ "accuracy_valid": true
+ }
+ }
+ }
+ },
+ "cm-mlperf-inference-results-last": {
+ "performance": "0.809",
+ "performance_valid": true,
+ "accuracy": "49.593",
+ "accuracy_valid": true
+ }
+ },
+ "new_state": {
+ "app_mlperf_inference_log_summary": {
+ "sut name": "PySUT",
+ "scenario": "Offline",
+ "mode": "PerformanceOnly",
+ "samples per second": "0.808629",
+ "result is": "VALID",
+ "min duration satisfied": "Yes",
+ "min queries satisfied": "Yes",
+ "early stopping satisfied": "Yes",
+ "min latency (ns)": "963485100",
+ "max latency (ns)": "12366604800",
+ "mean latency (ns)": "5961694610",
+ "50.00 percentile latency (ns)": "6164791100",
+ "90.00 percentile latency (ns)": "12366604800",
+ "95.00 percentile latency (ns)": "12366604800",
+ "97.00 percentile latency (ns)": "12366604800",
+ "99.00 percentile latency (ns)": "12366604800",
+ "99.90 percentile latency (ns)": "12366604800",
+ "samples_per_query": "10",
+ "target_qps": "1",
+ "target_latency (ns)": "0",
+ "max_async_queries": "1",
+ "min_duration (ms)": "0",
+ "max_duration (ms)": "0",
+ "min_query_count": "1",
+ "max_query_count": "10",
+ "qsl_rng_seed": "13281865557512327830",
+ "sample_index_rng_seed": "198141574272810017",
+ "schedule_rng_seed": "7575108116881280410",
+ "accuracy_log_rng_seed": "0",
+ "accuracy_log_probability": "0",
+ "accuracy_log_sampling_target": "0",
+ "print_timestamps": "0",
+ "performance_issue_unique": "0",
+ "performance_issue_same": "0",
+ "performance_issue_same_index": "0",
+ "performance_sample_count": "64"
+ },
+ "app_mlperf_inference_measurements": {
+ "starting_weights_filename": "resnext50_32x4d_fpn.onnx",
+ "retraining": "no",
+ "input_data_types": "fp32",
+ "weight_data_types": "fp32",
+ "weight_transformations": "no"
+ },
+ "cm-mlperf-inference-results": {
+ "FGG_LENOVO_P14S-reference-cpu-onnxruntime-v1.16.0-default_config": {
+ "retinanet": {
+ "Offline": {
+ "performance": "0.809",
+ "performance_valid": true,
+ "accuracy": "49.593",
+ "accuracy_valid": true
+ }
+ }
+ }
+ },
+ "cm-mlperf-inference-results-last": {
+ "performance": "0.809",
+ "performance_valid": true,
+ "accuracy": "49.593",
+ "accuracy_valid": true
+ }
+ },
+ "deps": [
+ "detect,os",
+ "detect,cpu",
+ "get,python3",
+ "get,mlcommons,inference,src",
+ "get,sut,description",
+ "get,mlperf,inference,results,dir",
+ "install,pip-package,for-cmind-python,_package.tabulate",
+ "get,mlperf,inference,utils",
+ "generate,mlperf,inference,submission"
+ ]
+}
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf.md b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf.md
new file mode 100644
index 0000000000..97635650c3
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf.md
@@ -0,0 +1 @@
+TBD2
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-input.json
new file mode 100644
index 0000000000..68cf51d221
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-input.json
@@ -0,0 +1,55 @@
+{
+ "action": "run",
+ "automation": "script",
+ "tags": "run-mlperf-inference,_r4.0,_performance-only,_short",
+ "division": "open",
+ "category": "edge",
+ "device": "cpu",
+ "model": "bert-99",
+ "precision": "float32",
+ "implementation": "reference",
+ "backend": "pytorch",
+ "scenario": "Offline",
+ "execution_mode": "test",
+ "power": "no",
+ "adr": {
+ "python": {
+ "version_min": "3.8"
+ }
+ },
+ "clean": true,
+ "compliance": "no",
+ "j": true,
+ "jf": "run-f05147815bf840b8",
+ "quiet": true,
+ "time": true,
+ "host_os": "linux",
+ "cmd": [
+ "--tags=run-mlperf-inference,_r4.0,_performance-only,_short",
+ "--division=open",
+ "--category=edge",
+ "--device=cpu",
+ "--model=bert-99",
+ "--precision=float32",
+ "--implementation=reference",
+ "--backend=pytorch",
+ "--scenario=Offline",
+ "--execution_mode=test",
+ "--power=no",
+ "--adr.python.version_min=3.8",
+ "--clean",
+ "--compliance=no",
+ "--j",
+ "--jf=run-f05147815bf840b8",
+ "--quiet",
+ "--time",
+ "--host_os=linux"
+ ],
+ "out": "con",
+ "parsed_automation": [
+ [
+ "script",
+ "5b4e0237da074764"
+ ]
+ ]
+}
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-meta.json
new file mode 100644
index 0000000000..45eb699b96
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-meta.json
@@ -0,0 +1,9 @@
+{
+ "uid": "df843c22cbf54aaf",
+ "compute_uid": "fe379ecd1e054a00",
+ "bench_uid": "39877bb63fb54725",
+ "date_time": "2024-02-18",
+ "functional": true,
+ "reproduced": true,
+ "support_docker": true
+}
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-output.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-output.json
new file mode 100644
index 0000000000..627e18889a
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-output.json
@@ -0,0 +1,137 @@
+{
+ "return": 0,
+ "env": {},
+ "new_env": {},
+ "state": {
+ "app_mlperf_inference_log_summary": {
+ "sut name": "PySUT",
+ "scenario": "Offline",
+ "mode": "PerformanceOnly",
+ "samples per second": "0.771384",
+ "result is": "VALID",
+ "min duration satisfied": "Yes",
+ "min queries satisfied": "Yes",
+ "early stopping satisfied": "Yes",
+ "min latency (ns)": "1409122219",
+ "max latency (ns)": "12963712908",
+ "mean latency (ns)": "7203424157",
+ "50.00 percentile latency (ns)": "7862607410",
+ "90.00 percentile latency (ns)": "12963712908",
+ "95.00 percentile latency (ns)": "12963712908",
+ "97.00 percentile latency (ns)": "12963712908",
+ "99.00 percentile latency (ns)": "12963712908",
+ "99.90 percentile latency (ns)": "12963712908",
+ "samples_per_query": "10",
+ "target_qps": "1",
+ "target_latency (ns)": "0",
+ "max_async_queries": "1",
+ "min_duration (ms)": "0",
+ "max_duration (ms)": "0",
+ "min_query_count": "1",
+ "max_query_count": "10",
+ "qsl_rng_seed": "13281865557512327830",
+ "sample_index_rng_seed": "198141574272810017",
+ "schedule_rng_seed": "7575108116881280410",
+ "accuracy_log_rng_seed": "0",
+ "accuracy_log_probability": "0",
+ "accuracy_log_sampling_target": "0",
+ "print_timestamps": "0",
+ "performance_issue_unique": "0",
+ "performance_issue_same": "0",
+ "performance_issue_same_index": "0",
+ "performance_sample_count": "10833"
+ },
+ "app_mlperf_inference_measurements": {
+ "starting_weights_filename": "https://zenodo.org/record/3733896/files/model.pytorch",
+ "retraining": "no",
+ "input_data_types": "fp32",
+ "weight_data_types": "fp32",
+ "weight_transformations": "none"
+ },
+ "cm-mlperf-inference-results": {
+ "ip_172_31_89_56-reference-cpu-pytorch-v2.1.0-default_config": {
+ "bert-99": {
+ "Offline": {
+ "performance": "0.771",
+ "performance_valid": true
+ }
+ }
+ }
+ },
+ "cm-mlperf-inference-results-last": {
+ "performance": "0.771",
+ "performance_valid": true
+ }
+ },
+ "new_state": {
+ "app_mlperf_inference_log_summary": {
+ "sut name": "PySUT",
+ "scenario": "Offline",
+ "mode": "PerformanceOnly",
+ "samples per second": "0.771384",
+ "result is": "VALID",
+ "min duration satisfied": "Yes",
+ "min queries satisfied": "Yes",
+ "early stopping satisfied": "Yes",
+ "min latency (ns)": "1409122219",
+ "max latency (ns)": "12963712908",
+ "mean latency (ns)": "7203424157",
+ "50.00 percentile latency (ns)": "7862607410",
+ "90.00 percentile latency (ns)": "12963712908",
+ "95.00 percentile latency (ns)": "12963712908",
+ "97.00 percentile latency (ns)": "12963712908",
+ "99.00 percentile latency (ns)": "12963712908",
+ "99.90 percentile latency (ns)": "12963712908",
+ "samples_per_query": "10",
+ "target_qps": "1",
+ "target_latency (ns)": "0",
+ "max_async_queries": "1",
+ "min_duration (ms)": "0",
+ "max_duration (ms)": "0",
+ "min_query_count": "1",
+ "max_query_count": "10",
+ "qsl_rng_seed": "13281865557512327830",
+ "sample_index_rng_seed": "198141574272810017",
+ "schedule_rng_seed": "7575108116881280410",
+ "accuracy_log_rng_seed": "0",
+ "accuracy_log_probability": "0",
+ "accuracy_log_sampling_target": "0",
+ "print_timestamps": "0",
+ "performance_issue_unique": "0",
+ "performance_issue_same": "0",
+ "performance_issue_same_index": "0",
+ "performance_sample_count": "10833"
+ },
+ "app_mlperf_inference_measurements": {
+ "starting_weights_filename": "https://zenodo.org/record/3733896/files/model.pytorch",
+ "retraining": "no",
+ "input_data_types": "fp32",
+ "weight_data_types": "fp32",
+ "weight_transformations": "none"
+ },
+ "cm-mlperf-inference-results": {
+ "ip_172_31_89_56-reference-cpu-pytorch-v2.1.0-default_config": {
+ "bert-99": {
+ "Offline": {
+ "performance": "0.771",
+ "performance_valid": true
+ }
+ }
+ }
+ },
+ "cm-mlperf-inference-results-last": {
+ "performance": "0.771",
+ "performance_valid": true
+ }
+ },
+ "deps": [
+ "detect,os",
+ "detect,cpu",
+ "get,python3",
+ "get,mlcommons,inference,src",
+ "get,sut,description",
+ "get,mlperf,inference,results,dir",
+ "install,pip-package,for-cmind-python,_package.tabulate",
+ "get,mlperf,inference,utils"
+ ]
+}
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v4.0/_cm.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v4.0/_cm.yaml
new file mode 100644
index 0000000000..50086d0862
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v4.0/_cm.yaml
@@ -0,0 +1,38 @@
+alias: benchmark-run-mlperf-inference-v4.0
+uid: b4ee9b6c820e493a
+
+automation_alias: cfg
+automation_uid: 88dce9c160324c5d
+
+tags:
+- benchmark
+- run
+- mlperf
+- inference
+- v4.0
+
+name: "MLPerf inference - v4.0"
+
+supported_compute:
+- ee8c568e0ac44f2b
+- fe379ecd1e054a00
+
+bench_uid: 39877bb63fb54725
+
+view_dimensions:
+- - input.device
+ - "MLPerf device"
+- - input.implementation
+ - "MLPerf implementation"
+- - input.backend
+ - "MLPerf backend"
+- - input.model
+ - "MLPerf model"
+- - input.scenario
+ - "MLPerf scenario"
+- - input.host_os
+ - "Host OS"
+- - output.state.cm-mlperf-inference-results-last.performance
+ - "Got performance"
+- - output.state.cm-mlperf-inference-results-last.accuracy
+ - "Got accuracy"
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v4.1/_cm.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v4.1/_cm.yaml
new file mode 100644
index 0000000000..716adc20b3
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v4.1/_cm.yaml
@@ -0,0 +1,39 @@
+alias: benchmark-run-mlperf-inference-v4.1
+uid: b7e89771987d4168
+
+automation_alias: cfg
+automation_uid: 88dce9c160324c5d
+
+tags:
+- benchmark
+- run
+- mlperf
+- inference
+- v4.1
+
+name: "MLPerf inference - v4.1"
+
+supported_compute:
+- ee8c568e0ac44f2b
+- fe379ecd1e054a00
+- d8f06040f7294319
+
+bench_uid: 39877bb63fb54725
+
+view_dimensions:
+- - input.device
+ - "MLPerf device"
+- - input.implementation
+ - "MLPerf implementation"
+- - input.backend
+ - "MLPerf backend"
+- - input.model
+ - "MLPerf model"
+- - input.scenario
+ - "MLPerf scenario"
+- - input.host_os
+ - "Host OS"
+- - output.state.cm-mlperf-inference-results-last.performance
+ - "Got performance"
+- - output.state.cm-mlperf-inference-results-last.accuracy
+ - "Got accuracy"
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/_cm.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/_cm.yaml
new file mode 100644
index 0000000000..d5d60a3857
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/_cm.yaml
@@ -0,0 +1,10 @@
+alias: docker-basic-configurations
+uid: d2a0c5bb17664c93
+
+automation_alias: cfg
+automation_uid: 88dce9c160324c5d
+
+tags:
+- docker
+- basic
+- configurations
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-archlinux.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-archlinux.yaml
new file mode 100644
index 0000000000..238a1e57c2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-archlinux.yaml
@@ -0,0 +1,9 @@
+uid: 9960e9fb3cb24cb3
+
+name: "Basic ArchLinux"
+
+input:
+ docker_base_image: 'archlinux'
+ docker_os: arch
+ docker_os_version: 'latest'
+
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-rhel-9.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-rhel-9.yaml
new file mode 100644
index 0000000000..1fba915fa2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-rhel-9.yaml
@@ -0,0 +1,9 @@
+uid: 27b4afcdd8e042e8
+
+name: "Basic RHEL 9"
+
+input:
+ docker_base_image: 'registry.access.redhat.com/ubi9'
+ docker_os: 'rhel'
+ docker_os_version: '9'
+
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-20.04.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-20.04.yaml
new file mode 100644
index 0000000000..35c0b48f93
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-20.04.yaml
@@ -0,0 +1,9 @@
+uid: 59311e6098c14b21
+
+name: "Basic Ubuntu 20.04"
+
+input:
+ docker_base_image: 'ubuntu:20.04'
+ docker_os: ubuntu
+ docker_os_version: '20.04'
+
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-22.04.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-22.04.yaml
new file mode 100644
index 0000000000..371ba2b6af
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-22.04.yaml
@@ -0,0 +1,9 @@
+uid: 614aa48d90724835
+
+name: "Basic Ubuntu 22.04"
+
+input:
+ docker_base_image: 'ubuntu:22.04'
+ docker_os: ubuntu
+ docker_os_version: '22.04'
+
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-23.04.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-23.04.yaml
new file mode 100644
index 0000000000..990d56cf5a
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-23.04.yaml
@@ -0,0 +1,9 @@
+uid: 276bd8ab39324f5f
+
+name: "Basic Ubuntu 23.04"
+
+input:
+ docker_base_image: 'ubuntu:23.04'
+ docker_os: ubuntu
+ docker_os_version: '23.04'
+
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-24.04.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-24.04.yaml
new file mode 100644
index 0000000000..d949d5519b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-24.04.yaml
@@ -0,0 +1,9 @@
+uid: 12e86eb386314866
+
+name: "Basic Ubuntu 24.04"
+
+input:
+ docker_base_image: 'ubuntu:24.04'
+ docker_os: ubuntu
+ docker_os_version: '24.04'
+
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-20.04-cuda-11.8-cudnn-8.6.0-pytorch-1.13.0.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-20.04-cuda-11.8-cudnn-8.6.0-pytorch-1.13.0.yaml
new file mode 100644
index 0000000000..16107d8d5c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-20.04-cuda-11.8-cudnn-8.6.0-pytorch-1.13.0.yaml
@@ -0,0 +1,11 @@
+uid: 854e65fb31584d63
+
+name: "Nvidia Ubuntu 20.04 CUDA 11.8 cuDNN 8.6.0 PyTorch 1.13.0 (pytorch:22.10)"
+
+ref_url: https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-22-10.html
+
+input:
+ docker_base_image: 'nvcr.io/nvidia/pytorch:22.10-py3'
+ docker_os: ubuntu
+ docker_os_version: '20.04'
+
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.1-cudnn-8.9.1-pytorch-2.0.0.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.1-cudnn-8.9.1-pytorch-2.0.0.yaml
new file mode 100644
index 0000000000..66b9efd0d9
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.1-cudnn-8.9.1-pytorch-2.0.0.yaml
@@ -0,0 +1,11 @@
+uid: e0e7167139a74e36
+
+name: "Nvidia Ubuntu 22.04 CUDA 12.1 cuDNN 8.9.1 PyTorch 2.0.0 (pytorch:23.05)"
+
+ref_url: https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-23-05.html
+
+input:
+ docker_base_image: 'nvcr.io/nvidia/pytorch:23.05-py3'
+ docker_os: ubuntu
+ docker_os_version: '22.04'
+
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.4-cudnn-9.0.0-pytorch-2.3.0.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.4-cudnn-9.0.0-pytorch-2.3.0.yaml
new file mode 100644
index 0000000000..38bcff6942
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.4-cudnn-9.0.0-pytorch-2.3.0.yaml
@@ -0,0 +1,11 @@
+uid: 49fc51f2999b4545
+
+name: "Nvidia Ubuntu 22.04 CUDA 12.4 cuDNN 9.0.0 PyTorch 2.3.0 (pytorch:24.03)"
+
+ref_url: https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-03.html
+
+input:
+ docker_base_image: 'nvcr.io/nvidia/pytorch:24.03-py3'
+ docker_os: ubuntu
+ docker_os_version: '22.04'
+
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.5-cudnn-9.1.0-pytorch-2.4.0.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.5-cudnn-9.1.0-pytorch-2.4.0.yaml
new file mode 100644
index 0000000000..b4e45d348f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.5-cudnn-9.1.0-pytorch-2.4.0.yaml
@@ -0,0 +1,11 @@
+uid: 81879736ae5842f4
+
+name: "Nvidia Ubuntu 22.04 CUDA 12.5 cuDNN 9.1.0 PyTorch 2.4.0 (pytorch:24.06)"
+
+ref_url: https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-06.html
+
+input:
+ docker_base_image: 'nvcr.io/nvidia/pytorch:24.06-py3'
+ docker_os: ubuntu
+ docker_os_version: '22.04'
+
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.6-cudnn-9.3.0-pytorch-2.5.0.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.6-cudnn-9.3.0-pytorch-2.5.0.yaml
new file mode 100644
index 0000000000..a9e2229ead
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.6-cudnn-9.3.0-pytorch-2.5.0.yaml
@@ -0,0 +1,11 @@
+uid: 203a68df99d44137
+
+name: "Nvidia Ubuntu 22.04 CUDA 12.6 cuDNN 9.3.0 PyTorch 2.5.0 (pytorch:24.08)"
+
+ref_url: https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-08.html
+
+input:
+ docker_base_image: 'nvcr.io/nvidia/pytorch:24.08-py3'
+ docker_os: ubuntu
+ docker_os_version: '22.04'
+
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/add-derived-metrics-to-mlperf-inference/README.md b/cmx4mlops/cmx4mlops/repo/challenge/add-derived-metrics-to-mlperf-inference/README.md
new file mode 100644
index 0000000000..8302f63d68
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/add-derived-metrics-to-mlperf-inference/README.md
@@ -0,0 +1,32 @@
+### Challenge
+
+Check past MLPerf inference results in [this MLCommons repository](https://github.com/mlcommons/cm4mlperf-results)
+and add derived metrics such as result/No of cores, power efficiency, device cost, operational costs, etc.
+
+Add clock speed as a third dimension to graphs and improve Bar graph visualization.
+
+Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
+to run reference implementations of MLPerf inference benchmarks
+using the CM automation language and use them as a base for your developments.
+
+Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision.
+
+
+### Prizes
+
+* *All contributors will receive 1 point for submitting valid results for 1 complete benchmark on one system.*
+* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).*
+
+
+### Organizers
+
+* [MLCommons](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation)
+* [cKnowledge.org](https://www.linkedin.com/company/cknowledge)
+
+### Results
+
+All accepted results will be publicly available in the CM format with derived metrics
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
+in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
+and at official [MLCommons website](https://mlcommons.org).
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/add-derived-metrics-to-mlperf-inference/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/add-derived-metrics-to-mlperf-inference/_cm.json
new file mode 100644
index 0000000000..cbdc212467
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/add-derived-metrics-to-mlperf-inference/_cm.json
@@ -0,0 +1,22 @@
+{
+ "alias": "add-derived-metrics-to-mlperf-inference",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close_extension": true,
+ "date_open": "20240204",
+ "points": 2,
+ "tags": [
+ "modularize",
+ "optimize",
+ "reproduce",
+ "replicate",
+ "benchmark",
+ "automate",
+ "derived-metrics",
+ "mlperf-inference",
+ "mlperf-inference-derived-metrics"
+ ],
+ "title": "Add derived metrics to MLPerf inference benchmarks (power efficiency, results / No of cores, costs, etc)",
+ "trophies": true,
+ "uid": "c65b56d7770946ee"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/automate-mlperf-inference-v3.1-and-v4.0-2024/README.md b/cmx4mlops/cmx4mlops/repo/challenge/automate-mlperf-inference-v3.1-and-v4.0-2024/README.md
new file mode 100644
index 0000000000..a2059c0fe8
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/automate-mlperf-inference-v3.1-and-v4.0-2024/README.md
@@ -0,0 +1,4 @@
+20240220:
+* A prototype of a GUI to generate CM commands to run MLPerf inference benchmarks is ready: [link](https://access.cknowledge.org/playground/?action=howtorun&bench_uid=39877bb63fb54725)
+* A prototype of the infrastructure to reproduce MLPerf inference benchmark results is ready: [link](https://access.cknowledge.org/playground/?action=reproduce)
+* On-going efforts: https://github.com/mlcommons/ck/issues/1052
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/automate-mlperf-inference-v3.1-and-v4.0-2024/_cm.yaml b/cmx4mlops/cmx4mlops/repo/challenge/automate-mlperf-inference-v3.1-and-v4.0-2024/_cm.yaml
new file mode 100644
index 0000000000..b8b519d27f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/automate-mlperf-inference-v3.1-and-v4.0-2024/_cm.yaml
@@ -0,0 +1,21 @@
+alias: automate-mlperf-inference-v3.1-and-v4.0-2024
+uid: f89f152fc2614240
+
+automation_alias: challenge
+automation_uid: 3d84abd768f34e08
+
+title: Add MLCommons CM workflows and unifed interface to automate MLPerf inference v3.1 and v4.0 benchmarks (Intel, Nvidia, Qualcomm, Arm64, TPU ...)
+
+date_open: '20231215'
+date_close: '20240315'
+
+hot: true
+
+tags:
+- automate
+- mlperf-inference-v3.1-and-v4.0
+- 2024
+
+experiments:
+- tags: mlperf-inference,v3.1
+- tags: mlperf-inference,v4.0
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/compose-high-performance-and-cost-efficient-ai-systems-based-on-mlperf-4.0-2024/README.md b/cmx4mlops/cmx4mlops/repo/challenge/compose-high-performance-and-cost-efficient-ai-systems-based-on-mlperf-4.0-2024/README.md
new file mode 100644
index 0000000000..adfbea7263
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/compose-high-performance-and-cost-efficient-ai-systems-based-on-mlperf-4.0-2024/README.md
@@ -0,0 +1,10 @@
+This challenge is under preparation. You can read about the motivation behind this challenge in our [invited talk at MLPerf-Bench @ HPCA'24](https://doi.org/10.5281/zenodo.10786893).
+
+We plan to extend [MLCommons CM framework](https://github.com/mlcommons/ck)
+to automatically compose high-performance and cost-efficient AI systems
+based on MLPerf inference v4.0 results and [CM automation recipes](https://access.cknowledge.org/playground/?action=scripts).
+
+* A prototype of a GUI to generate CM commands to run MLPerf inference benchmarks is ready: [link](https://access.cknowledge.org/playground/?action=howtorun&bench_uid=39877bb63fb54725)
+* A prototype of the infrastructure to reproduce MLPerf inference benchmark results is ready: [link](https://access.cknowledge.org/playground/?action=reproduce)
+
+Contact the [MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) for more details.
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/compose-high-performance-and-cost-efficient-ai-systems-based-on-mlperf-4.0-2024/_cm.yaml b/cmx4mlops/cmx4mlops/repo/challenge/compose-high-performance-and-cost-efficient-ai-systems-based-on-mlperf-4.0-2024/_cm.yaml
new file mode 100644
index 0000000000..b1d4fe9f18
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/compose-high-performance-and-cost-efficient-ai-systems-based-on-mlperf-4.0-2024/_cm.yaml
@@ -0,0 +1,25 @@
+alias: compose-high-performance-and-cost-efficient-ai-systems-based-on-mlperf-4.0-2024
+uid: 7c983102d89e4869
+
+automation_alias: challenge
+automation_uid: 3d84abd768f34e08
+
+title: "Compose high-performance and cost-efficint AI systems using MLCommons' Collective Mind and MLPerf inference"
+
+date_open: '20240101'
+
+tags:
+- compose
+- ai
+- systems
+- mlperf-inference-v4.0
+- cm
+- mlcommons-cm
+- mlperf
+- v4.0
+- performance
+- energy
+- cost
+
+experiments:
+- tags: mlperf-inference,v4.0
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/README.md b/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/README.md
new file mode 100644
index 0000000000..306341271c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/README.md
@@ -0,0 +1,30 @@
+### Challenge
+
+Connect CM workflows to run MLPerf inference benchmarks with [OpenBenchmarking.org](https://openbenchmarking.org).
+
+Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
+to run reference implementations of MLPerf inference benchmarks
+using the CM automation language and use them as a base for your developments.
+
+Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision.
+
+
+### Prizes
+
+* *All contributors will receive 1 point for submitting valid results for 1 complete benchmark on one system.*
+* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).*
+
+
+
+### Organizers
+
+* Michael Larabel
+* Grigori Fursin
+* [MLCommons](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation)
+* [cKnowledge.org](https://www.linkedin.com/company/cknowledge)
+
+### Results
+
+Results will be available at [OpenBenchmark.org](https://openbenchmarking.org)
+and [MLCommons CK playgronud](https://access.cknowledge.org/playground/?action=experiments).
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/_cm.json
new file mode 100644
index 0000000000..c1e65aadbd
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/_cm.json
@@ -0,0 +1,22 @@
+{
+ "alias": "connect-mlperf-inference-v3.1-with-openbenchmarking",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_open": "20240101",
+ "date_close_extension": true,
+ "points": 2,
+ "tags": [
+ "modularize",
+ "optimize",
+ "reproduce",
+ "replicate",
+ "benchmark",
+ "automate",
+ "openbenchmarking",
+ "mlperf-inference",
+ "mlperf-inference-openbenchmarking"
+ ],
+ "title": "Run MLPerf inference benchmarks using CM via OpenBenchmarking.org",
+ "trophies": true,
+ "uid": "534592626eb44efe"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-with-medperf/README.md b/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-with-medperf/README.md
new file mode 100644
index 0000000000..f2f572bd48
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-with-medperf/README.md
@@ -0,0 +1,23 @@
+### Challenge
+
+Evaluate models from [MLCommons MedPerf platform](https://www.medperf.org) in terms of latency, throughput, power consumption and other metrics
+using MLPerf loadgen and MLCommons CM automation language.
+
+See the [Nature 2023 article about MedPerf](https://www.nature.com/articles/s42256-023-00652-2)
+and [ACM REP'23 keynote about CM](https://doi.org/10.5281/zenodo.8105339) to learn more about these projects.
+
+Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
+to run reference implementations of MLPerf inference benchmarks
+using the CM automation language and use them as a base for your developments.
+
+
+### Prizes
+
+* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).*
+
+
+### Organizers
+
+* [cKnowledge.org](https://www.linkedin.com/company/cknowledge)
+* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation)
+* [MLCommons](https://cKnowledge.org/mlcommons-taskforce)
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-with-medperf/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-with-medperf/_cm.json
new file mode 100644
index 0000000000..d48d0a9fea
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-with-medperf/_cm.json
@@ -0,0 +1,26 @@
+{
+ "alias": "connect-mlperf-with-medperf",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close_extension": true,
+ "date_open": "20240105",
+ "points": 2,
+ "tags": [
+ "modularize",
+ "optimize",
+ "reproduce",
+ "replicate",
+ "benchmark",
+ "automate",
+ "medperf",
+ "mlperf-inference",
+ "mlperf-inference-medperf",
+ "mlperf-inference-medperf",
+ "mlperf-inference-medperf-v3.1",
+ "mlperf-inference-medperf-v3.1-2023",
+ "v3.1"
+ ],
+ "title": "Connect MedPerf with MLPerf and CM",
+ "trophies": true,
+ "uid": "c26d1fbf89164728"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2023/README.md
new file mode 100644
index 0000000000..62a4826ad2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2023/README.md
@@ -0,0 +1,16 @@
+### CM tutorial
+
+https://github.com/mlcommons/ck/blob/master/docs/tutorials/scc23-mlperf-inference-bert.md
+
+### Challenge
+
+Reproduce and optimize MLPerf inference benchmarks during Student Cluster Competition at SuperComputing'23.
+
+See our [related challange from 2022]()https://access.cknowledge.org/playground/?action=challenges&name=repro-mlperf-inference-retinanet-scc2022).
+
+### Organizers
+
+* [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning foundation](https://cTuning.org)
+* [cKnowledge.org](https://cKnowledge.org)
+
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2023/_cm.json
new file mode 100644
index 0000000000..021872b15a
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2023/_cm.json
@@ -0,0 +1,20 @@
+{
+ "alias": "optimize-mlperf-inference-scc2023",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close": "20231115",
+ "date_open": "20230915",
+ "tags": [
+ "automate",
+ "modularize",
+ "reproduce",
+ "replicate",
+ "benchmark",
+ "mlperf",
+ "mlperf-inference",
+ "mlperf-inference-scc",
+ "mlperf-inference-scc-2023"
+ ],
+ "title": "Reproduce and optimize MLPerf inference v3.1 benchmarks at the Student Cluster Competition'23 at SuperComputing'23 using CM",
+ "uid": "ddaf594f84b14bc2"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2024/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2024/README.md
new file mode 100644
index 0000000000..1f9be23af2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2024/README.md
@@ -0,0 +1,7 @@
+The [MLCommons](https://mlcommons.org), [cTuning foundation](https://cTuning.org) and [cKnowledge.org](https://cKnowledge.org)
+are preparing a unified interface to run MLPerf inference benchmark at the Student Cluster Competition'24.
+
+See [the CM-MLPerf tutorial for SCC'23](https://github.com/mlcommons/ck/blob/master/docs/tutorials/scc23-mlperf-inference-bert.md).
+Note that the MLPerf model will change in SCC'24 - please stay tuned for more details!
+
+See https://sc24.supercomputing.org/students/student-cluster-competition for more details about SCC.
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2024/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2024/_cm.json
new file mode 100644
index 0000000000..ab75aa27a6
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2024/_cm.json
@@ -0,0 +1,19 @@
+{
+ "alias": "optimize-mlperf-inference-scc2024",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_open": "20241001",
+ "tags": [
+ "automate",
+ "modularize",
+ "reproduce",
+ "replicate",
+ "benchmark",
+ "mlperf",
+ "mlperf-inference",
+ "mlperf-inference-scc",
+ "mlperf-inference-scc-2024"
+ ],
+ "title": "Run and optimize the MLPerf inference benchmark using CM at the Student Cluster Competition'24 at SuperComputing'24",
+ "uid": "f7fcba4c43ab4412"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v2.1-2022/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v2.1-2022/README.md
new file mode 100644
index 0000000000..d0ac7cf15b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v2.1-2022/README.md
@@ -0,0 +1,18 @@
+### Challenge
+
+Prepare, optimize and reproduce MLPerf inference v2.1 benchmarks across diverse implementations, software and hardware
+using the [MLCommons CK framework](https://github.com/mlcommons/ck).
+
+### Organizers
+
+* [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning foundation](https://cTuning.org)
+* [OctoML](https://octoml.ai)
+
+### Status
+
+This challenge has been successfully completed.
+
+### Results
+
+Results are available [here](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-inference,v2.1).
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v2.1-2022/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v2.1-2022/_cm.json
new file mode 100644
index 0000000000..31cb5dffd2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v2.1-2022/_cm.json
@@ -0,0 +1,27 @@
+{
+ "alias": "optimize-mlperf-inference-v2.1-2022",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close": "20220901",
+ "date_open": "20220701",
+ "experiments": [
+ {
+ "tags": "mlperf-inference,v2.1"
+ }
+ ],
+ "tags": [
+ "modularize",
+ "optimize",
+ "reproduce",
+ "replicate",
+ "automate",
+ "benchmark",
+ "mlperf",
+ "mlperf-inference",
+ "mlperf-inference-v2.1",
+ "mlperf-inference-v2.1-2022",
+ "v2.1"
+ ],
+ "title": "Run and optimize MLPerf inference v2.1 benchmarks",
+ "uid": "2e13154b7fbb412d"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/README.md
new file mode 100644
index 0000000000..da6decc8c7
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/README.md
@@ -0,0 +1,74 @@
+### Challenge
+
+Run MLPerf inference v3.0 benchmarks out-of-the-box across diverse implementations, software and hardware
+using the [MLCommons CM automation language](https://github.com/mlcommons/ck)
+and submit public results to the MLPerf inference v3.0 via [cTuning foundation](https://cTuning.org).
+
+* [GUI to run MLPerf inference benchmarks](https://cknowledge.org/mlperf-inference-gui)
+* [GUI to prepare MLPerf inference submissions](https://cknowledge.org/mlperf-inference-submission-gui)
+
+### Organizers
+
+* [MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md)
+* [cTuning foundation](https://cTuning.org)
+* [cKnowledge](https://cKnowledge.org)
+
+### Status
+
+This challenge has been successfully completed.
+
+### Results
+
+Official results:
+* https://github.com/mlcommons/inference_results_v3.0/tree/main/closed/cTuning
+* https://github.com/mlcommons/inference_results_v3.0/tree/main/open/cTuning
+
+Results in the MLCommons CK/CM format:
+* https://github.com/mlcommons/cm4mlperf-results
+
+Visualization and comparison with derived metrics:
+* [MLCommons Collective Knowledge Playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-inference,v3.0).
+
+### The outcome
+
+We are very pleased to announce the successful outcome of the 1st
+community challenge to run, reproduce and optimize MLPerf inference v3.0
+benchmarks: our MLCommons CK/CM workflow automation framework has helped
+to prepare more than 80% of all submission results including 98% of power
+results with very diverse technology and benchmark implementations from
+Neural Magic, Qualcomm, cKnowledge Ltd, KRAI, cTuning foundation, Dell
+Technologies, Hewlett Packard Enterprise, Lenovo, Hugging Face, NVIDIA,
+Intel Corporation, AMD and Apple across diverse CPUs, GPUs and DSPs with
+PyTorch, ONNX, QAIC, TF/TFLite, TVM and TensorRT using popular cloud
+providers (GCP, AWS, Azure) and individual servers and edge devices
+provided by our [volunteers](https://access.cknowledge.org/playground/?action=contributors).
+
+You can now see and compare all MLPerf inference results v3.0, v2.1 and
+v2.0 online together with reproducibility reports including the
+[MLPerf BERT model](https://huggingface.co/ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1)
+from the [Hugging Face Zoo](https://www.linkedin.com/company/huggingface/?lipi=urn%3Ali%3Apage%3Ad_flagship3_pulse_read%3B4CDUdiVxT7WqLJNXO%2BI5bQ%3D%3D)
+on [Nvidia Jetson Orin platform](https://github.com/mlcommons/ck/blob/master/cm-mlops/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-nvidia-jetson-orin.md#reproducing-the-nvidia-jetson-agx-orin-submission).
+You can even create your own derived metrics (such as performance per Watt),
+provide your own constraints using this [MLCommons repository](https://github.com/mlcommons/cm_inference_results) and visualize
+them as shown in [this example](https://access.cknowledge.org/playground/?action=experiments&name=e472410ee67c41f9&x=Result&y=Power_Efficiency&filter=result[%27Result_Power%27]%3C35&derived_metrics=result%5B%27Power_Efficiency%27%5D%3D1000%2Fresult%5B%27Result_Power%27%5D&c=accelerator_model_name&axis_key_s=version).
+
+Additional thanks to [Michael Goin](https://www.linkedin.com/in/michael-goin)
+from [Neural Magic](https://www.linkedin.com/company/neural-magic/?lipi=urn%3Ali%3Apage%3Ad_flagship3_pulse_read%3B4CDUdiVxT7WqLJNXO%2BI5bQ%3D%3D), our international
+students including [Himanshu Dutta](https://www.linkedin.com/in/ACoAACpPCiMB7zUNStsqBmaOCtd100a7wXBGu_M?lipi=urn%3Ali%3Apage%3Ad_flagship3_pulse_read%3B4CDUdiVxT7WqLJNXO%2BI5bQ%3D%3D),
+[Aditya Kumar Shaw](https://www.linkedin.com/in/ACoAACJ3ikUBjuHqi35ibm8CG6IEYv-v_VsobIs?lipi=urn%3Ali%3Apage%3Ad_flagship3_pulse_read%3B4CDUdiVxT7WqLJNXO%2BI5bQ%3D%3D),
+Sachin Mudaliyar, [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189),
+and all [CK/CM users and contributors](https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md) for helping us to
+validate, use and improve this open-source technology to automate
+benchmarking and optimization of AI/ML systems in terms of performance,
+accuracy, power and costs! We are also grateful to [HiPEAC](https://www.linkedin.com/company/hipeac)
+and [OctoML](https://www.linkedin.com/company/octoml) for
+sponsoring initial development and Peter Mattson, David Kanter, Vijay
+Janapa Reddi and Alexandros Karargyris for fruitful discussions.
+
+
+### Dissemination
+
+* [Forbes article](https://www.forbes.com/sites/karlfreund/2023/04/05/nvidia-performance-trounces-all-competitors-who-have-the-guts-to-submit-to-mlperf-inference-30/?sh=3c38d2866676)
+* [ZDNet article](https://www.zdnet.com/article/nvidia-dell-qualcomm-speed-up-ai-results-in-latest-benchmark-tests)
+* [LinkedIn article from Grigori Fursin (MLCommons Task Force co-chair)]( https://www.linkedin.com/pulse/announcing-my-new-project-reproducible-optimization-co-design-fursin )
+* [Linkedin article from Arjun Suresh (MLCommons Task Force co-chair)](https://www.linkedin.com/posts/arjunsuresh_nvidia-performance-trounces-all-competitors-activity-7049500972275929088-nnnx?utm_source=share&utm_medium=member_desktop)
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/_cm.json
new file mode 100644
index 0000000000..0baf3cfeea
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/_cm.json
@@ -0,0 +1,27 @@
+{
+ "alias": "optimize-mlperf-inference-v3.0-2023",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close": "20230301",
+ "date_open": "20230201",
+ "experiments": [
+ {
+ "tags": "mlperf-inference,v3.0"
+ }
+ ],
+ "tags": [
+ "modularize",
+ "optimize",
+ "reproduce",
+ "replicate",
+ "automate",
+ "benchmark",
+ "mlperf",
+ "mlperf-inference",
+ "mlperf-inference-v3.0",
+ "mlperf-inference-v3.0-2023",
+ "v3.0"
+ ],
+ "title": "Run and optimize MLPerf inference v3.0 benchmarks",
+ "uid": "57cbc3384d7640f9"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/crowd-benchmark-mlperf-bert-inference-cuda.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/crowd-benchmark-mlperf-bert-inference-cuda.md
new file mode 100644
index 0000000000..f6a17979ca
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/crowd-benchmark-mlperf-bert-inference-cuda.md
@@ -0,0 +1,281 @@
+# Crowd-benchmarking MLPerf BERT inference
+
+
+Click here to see the table of contents.
+
+* [Crowd-benchmarking MLPerf BERT inference](#crowd-benchmarking-mlperf-bert-inference)
+* [System preparation](#system-preparation)
+ * [Minimal system requirements](#minimal-system-requirements)
+ * [Install CM (CK2) automation meta-framework](#install-cm-ck2-automation-meta-framework)
+ * [Pull CM repository with portable automation recipes](#pull-cm-repository-with-portable-automation-recipes)
+ * [Detect or install CUDA](#detect-or-install-cuda)
+ * [Test CUDA installation](#test-cuda-installation)
+ * [Install Python virtual environment](#install-python-virtual-environment)
+ * [Detect or install cuDNN](#detect-or-install-cudnn)
+ * [Detect or install TensorRT](#detect-or-install-tensorrt)
+ * [Run MLPerf inference benchmark with BERT](#run-mlperf-inference-benchmark-with-bert)
+ * [Try ONNX runtime backend](#try-onnx-runtime-backend)
+ * [Do a test run to detect and record the system performance](#do-a-test-run-to-detect-and-record-the-system-performance)
+ * [Do a full accuracy run for all the scenarios](#do-a-full-accuracy-run-for-all-the-scenarios)
+ * [Do a full performance run for all the scenarios](#do-a-full-performance-run-for-all-the-scenarios)
+ * [Populate the README files](#populate-the-readme-files)
+ * [Generate MLPerf submission tree](#generate-mlperf-submission-tree)
+ * [Push the results to GitHub repo](#push-the-results-to-github-repo)
+ * [Try PyTorch backend](#try-pytorch-backend)
+ * [Test composable ML benchmark with other models, data sets, frameworks and platforms](#test-composable-ml-benchmark-with-other-models-data-sets-frameworks-and-platforms)
+* [The next steps](#the-next-steps)
+
+
+
+
+This is a pilot community project to collaboratively run MLPerf BERT inference benchmark
+across diverse platforms provided by volunteers similar to [SETI@home](https://setiathome.berkeley.edu/).
+However, instead of searching for extraterrestrial intelligence, we are
+searching for optimal software/hardware combination to run various AI and ML workloads
+in terms of performance, accuracy, power and costs ...
+
+This benchmark is composed from [portable and reusable automation recipes](https://github.com/mlcommons/ck/blob/master/docs/list_of_scripts.md)
+developed by [MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md)
+to modularize complex AI and ML Systems and automate their benchmarking, design space exploration, optimization and deployment
+across continuously evolving software, hardware, models and data.
+
+*If you submit your results before 1pm PST on Friday 3rd, 2023,
+ they will be accepted for the official MLPerf inference v3.0 submission round
+ and your name acknowledged in the notes!*
+
+
+# System preparation
+
+## Minimal system requirements
+
+* CPU: any x86-64 or Arm64 based machine
+* GPU: any relatively modern Nvidia GPU with 8GB+ memory and CUDA 11.4+
+* OS: we have tested this automation on Ubuntu 20.04, Ubuntu 22.04 and Debian 10
+* Disk space: ~10GB
+* Python: 3.8+
+* All other dependencies (artifacts and tools) will be installed by the CM meta-framework aka (CK2)
+
+## Install CM (CK2) automation meta-framework
+
+Follow [this guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) to install the [MLCommons CM framework](https://github.com/mlcommons/ck)
+(the 2nd generation on the Collective Mind framework) on your system.
+
+## Pull CM repository with portable automation recipes
+
+Pull MLCommons CM repository with [cross-platform CM scripts](https://github.com/mlcommons/ck/blob/master/docs/list_of_scripts.md)
+supporting portable MLOps and DevOps:
+
+```bash
+cm pull repo mlcommons@ck --checkout=681547519f4d9a8991d992d1300c90cfde06e9b9
+```
+
+CM pulls all such repositories into the `$HOME/CM` directory to search for portable CM automation recipes and artifacts.
+
+We use the unified CM CLI & Python API of [portable and reusable CM scripts](https://github.com/mlcommons/ck/blob/master/docs/list_of_scripts.md)
+to compose portable automation pipelines (also implemented as CM scripts) that can automatically detect or install all necessary artifacts (tools, models, datasets, libraries, etc)
+required to run a given software project such as the MLPerf inference benchmark.
+
+These CM scripts simply wrap existing native scripts and tools as simple micro-services
+with a human-readable CLI and simple Python API to be able to easily connect them together
+and run on any platform in a unified way.
+
+## Detect or install CUDA
+
+Run the following CM script:
+```bash
+cm run script "get cuda" --out=json
+```
+
+If CUDA is automatically detected, it will be registered in the CM cache:
+```bash
+cm show cache --tags=get,cuda
+```
+
+Otherwise, this script will attempt to download and install the latest CUDA
+from Nvidia website.
+
+Please report any issue with CM scripts [here](https://github.com/mlcommons/ck/issues).
+
+### Test CUDA installation
+
+You can test if CUDA toolkit and driver was detected or installed successfully using the following command:
+```bash
+cm run script "get cuda-devices"
+```
+
+You should see similar output:
+```txt
+Checking compiler version ...
+
+nvcc: NVIDIA (R) Cuda compiler driver
+Copyright (c) 2005-2022 NVIDIA Corporation
+Built on Wed_Sep_21_10:33:58_PDT_2022
+Cuda compilation tools, release 11.8, V11.8.89
+Build cuda_11.8.r11.8/compiler.31833905_0
+
+Compiling program ...
+
+Running program ...
+
+ - Running postprocess ...
+GPU Device ID: 0
+GPU Name: Tesla K80
+GPU compute capability: 3.7
+CUDA driver version: 11.4
+CUDA runtime version: 11.8
+Global memory: 11997020160
+Max clock rate: 823.500000 MHz
+Total amount of shared memory per block: 49152
+Total number of registers available per block: 65536
+Warp size: 32
+Maximum number of threads per multiprocessor: 2048
+Maximum number of threads per block: 1024
+Max dimension size of a thread block X: 1024
+Max dimension size of a thread block Y: 1024
+Max dimension size of a thread block Z: 64
+Max dimension size of a grid size X: 2147483647
+Max dimension size of a grid size Y: 65535
+Max dimension size of a grid size Z: 65535
+
+ - running time of script "get,cuda-devices": 4.16 sec.
+
+```
+
+## Install Python virtual environment
+
+```bash
+cm run script "get sys-utils-cm" --quiet
+
+cm run script "install python-venv" --name=mlperf-cuda
+```
+
+If you want to install specific version of Python use the following command:
+```bash
+cm run script "install python-venv" --version=3.10.8 --name=mlperf-cuda
+```
+
+## Detect or install cuDNN
+
+```bash
+cm run script "get cudnn"
+```
+
+If cuDNN is not detected on your system, you can download a TAR file
+from [Nvidia website](https://developer.nvidia.com/cudnn) and then use the same CM script
+to install it as follows:
+```bash
+cm run script "get cudnn" --tar_file=
+```
+
+We have tested this project with the following tar file `cudnn-linux-x86_64-8.7.0.84_cuda11-archive.tar.xz`.
+
+## Detect or install TensorRT
+
+```bash
+cm run script "get tensorrt"
+```
+If TensorRT is not detected on your system, you can download a TAR file
+from [Nvidia website](https://developer.nvidia.com/tensorrt) and then use the same CM script
+to install it as follows:
+```bash
+cm run script "get tensorrt" --tar_file=
+```
+
+We have tested this project with the following tar file `TensorRT-8.5.1.7.Linux.x86_64-gnu.cuda-11.8.cudnn8.6.tar.gz`.
+
+
+## Run MLPerf inference benchmark with BERT
+
+### Try ONNX runtime backend
+
+#### Do a test run to detect and record the system performance
+
+```bash
+cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \
+ --adr.python.name=mlperf-cuda --model=bert-99 --implementation=reference \
+ --device=cuda --backend=onnxruntime --quiet
+```
+
+#### Do a full accuracy run for all the scenarios
+
+```bash
+cm run script --tags=generate-run-cmds,inference,_accuracy-only,_all-scenarios \
+ --adr.python.name=mlperf-cuda --model=bert-99 --device=cuda \
+ --implementation=reference --backend=onnxruntime --quiet \
+ --execution-mode=valid --results_dir=$HOME/inference_3.0_results
+```
+
+#### Do a full performance run for all the scenarios
+
+```bash
+cm run script --tags=generate-run-cmds,inference,_performance-only,_all-scenarios \
+ --adr.python.name=mlperf-cuda --model=bert-99 --device=cuda \
+ --implementation=reference --backend=onnxruntime --quiet \
+ --execution-mode=valid --results_dir=$HOME/inference_3.0_results
+```
+
+#### Populate the README files
+
+```bash
+cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \
+ --adr.python.name=mlperf-cuda --model=bert-99 --device=cuda \
+ --implementation=reference --backend=onnxruntime --quiet \
+ --execution-mode=valid --results_dir=$HOME/inference_3.0_results
+```
+
+#### Generate MLPerf submission tree
+
+We should use the master branch of MLCommons inference repo for the submission checker.
+You can use `--hw_note_extra` option to add your name to the notes.
+
+```bash
+cm run script --tags=generate,inference,submission \
+ --results_dir=$HOME/inference_3.0_results/valid_results \
+ --adr.python.name=mlperf-cuda \
+ --device=cuda --submission_dir=$HOME/inference_submission_tree --clean \
+ --run-checker --submitter=cTuning --adr.inference-src.version=master
+ --hw_notes_extra="Result taken by " --quiet
+```
+
+#### Push the results to GitHub repo
+
+First create a fork of [this GitHub repo with aggregated results](https://github.com/ctuning/mlperf_inference_submissions_v3.0).
+Then run the following command after replacing `--repo_url` with your fork URL.
+
+```bash
+cm run script --tags=push,github,mlperf,inference,submission \
+ --submission_dir=$HOME/inference_submission_tree \
+ --adr.python.name=mlperf-cuda \
+ --repo_url=https://github.com/ctuning/mlperf_inference_submissions_v3.0 \
+ --commit_message="Bert crowd-results added"
+```
+
+Create a PR to the [GitHub repo with aggregated results](https://github.com/ctuning/mlperf_inference_submissions_v3.0/)
+
+
+
+### Try PyTorch backend
+
+You can run the same commands with PyTorch by rerunning all above commands and replacing `--backend=onnxruntime` with `--backend=pytorch`.
+
+For example,
+
+```bash
+cm run script --tags=generate-run-cmds,inference,_accuracy-only,_all-scenarios \
+ --adr.python.name=mlperf-cuda --model=bert-99 --device=cuda \
+ --implementation=reference --backend=pytorch --execution-mode=valid \
+ --results_dir=$HOME/inference_3.0_results --quiet
+```
+
+
+## Test composable ML benchmark with other models, data sets, frameworks and platforms
+
+* [GUI to prepare CM command line and run benchmark](https://cknowledge.org/mlperf-inference-gui)
+* [GUI to compare performance, accuracy, power and costs of ML/SW/HW combinations](https://cKnowledge.org/cm-gui-graph)
+
+
+# The next steps
+
+Please follow the [cTuning foundation](https://cTuning.org), [cKnowledge.org](https://cKnowledge.org)
+and [MLCommons](https://mlcommons.org).
+
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-3d-unet-submission.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-3d-unet-submission.md
new file mode 100644
index 0000000000..38f69a5d53
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-3d-unet-submission.md
@@ -0,0 +1,59 @@
+## Setup
+Please follow the MLCommons CK [installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) to install CM.
+Download the ck repo to get the CM script for MLPerf submission
+
+```
+cm pull repo mlcommons@ck --checkout=681547519f4d9a8991d992d1300c90cfde06e9b9
+```
+
+## Run Commands
+
+3d-unet has two variants - `3d-unet-99` and `3d-unet-99.9` where the `99` and `99.9` specifies the required accuracy constraint with respect to the reference floating point model. Both models can be submitter under edge as well as datacenter category.
+
+Since 3d-unet is one of the slowest running model, we are only running it using nvidia-implementation where the model is quantized and run on TensorRT backend on Nvidia GPU.
+
+For `3d-unet-99.9` runs, simply replace `3d-unet-99` with `3d-unet-99.9`.
+
+### TensorRT backend
+
+#### Do a test run to detect and record the system performance
+
+```
+cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \
+--model=3d-unet-99 --implementation=nvidia-original --device=cuda --backend=tensorrt \
+--category=edge --division=open --quiet
+```
+* Use `--category=datacenter` to run datacenter scenarios
+* Use `--division=closed` to run all scenarios for the closed division (compliance tests are skipped for `_find-performance` mode)
+
+#### Do a full accuracy and performance runs for all the scenarios
+
+```
+cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \
+--model=3d-unet-99 --device=cuda --implementation=nvidia-original --backend=tensorrt \
+--execution-mode=valid --results_dir=$HOME/inference_3.0_results \
+--category=edge --division=open --quiet
+```
+
+* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs
+* Use `--division=closed` to run all scenarios for the closed division including the compliance tests
+* `--offline_target_qps`, `--server_target_qps` and `--singlestream_target_latency` can be used to override the determined performance numbers
+
+#### Populate the README files
+```
+cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \
+--model=3d-unet-99 --device=cuda --implementation=nvidia-original --backend=tensorrt \
+--execution-mode=valid --results_dir=$HOME/inference_3.0_results \
+--category=edge --division=open --quiet
+```
+
+#### Generate actual submission tree
+
+Here, we are copying the performance and accuracy log files (compliance logs also in the case of closed division) from the results directory to the submission tree following the [directory structure required by MLCommons Inference](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#inference-1). After the submission tree is generated, [accuracy truncate script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) is called to truncate accuracy logs and then the [submission checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) is called to validate the generated submission tree.
+
+We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes.
+```
+cm run script --tags=generate,inference,submission --results_dir=$HOME/inference_3.0_results/valid_results \
+--device=cpu --submission_dir=$HOME/inference_submission_tree --clean --run-checker --submitter=cTuning
+--adr.inference-src.version=master --hw_notes_extra="Result taken by NAME" --quiet
+```
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-bert-submission.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-bert-submission.md
new file mode 100644
index 0000000000..8aebb068f0
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-bert-submission.md
@@ -0,0 +1,80 @@
+## Setup
+Please follow the MLCommons CK [installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) to install CM.
+Download the ck repo to get the CM script for MLPerf submission
+
+```
+cm pull repo mlcommons@ck --checkout=681547519f4d9a8991d992d1300c90cfde06e9b9
+```
+
+## Run Commands
+
+Bert has two variants - `bert-99` and `bert-99.9` where the `99` and `99.9` specifies the required accuracy constraint with respect to the reference floating point model. `bert-99.9` model is applicable only on a datacenter system.
+
+On edge category `bert-99` has Offline and SingleStream scenarios and in datacenter category both `bert-99` and `bert-99.9` have Offline and Server scenarios. The below commands are assuming an edge category system.
+
+### Onnxruntime backend
+
+#### Do a test run to detect and record the system performance
+
+```
+cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \
+--model=bert-99 --implementation=reference --device=cpu --backend=onnxruntime \
+--category=edge --division=open --quiet
+```
+* Use `--device=cuda` to run the inference on Nvidia GPU
+* Use `--division=closed` to run all scenarios for the closed division including the compliance tests
+* Use `--category=datacenter` to run datacenter scenarios
+
+#### Do a full accuracy and performance runs for all the scenarios
+
+```
+cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \
+--model=bert-99 --device=cpu --implementation=reference --backend=onnxruntime \
+--execution-mode=valid --results_dir=$HOME/inference_3.0_results \
+--category=edge --division=open --quiet
+```
+
+* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs
+* Use `--division=closed` to run all scenarios for the closed division including the compliance tests
+* `--offline_target_qps`, `--server_target_qps` and `--singlestream_target_latency` can be used to override the determined performance numbers
+
+#### Populate the README files
+```
+cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \
+--model=bert-99 --device=cpu --implementation=reference --backend=onnxruntime \
+--execution-mode=valid --results_dir=$HOME/inference_3.0_results \
+--category=edge --division=open --quiet
+```
+
+#### Generate actual submission tree
+
+Here, we are copying the performance and accuracy log files (compliance logs also in the case of closed division) from the results directory to the submission tree following the [directory structure required by MLCommons Inference](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#inference-1). After the submission tree is generated, [accuracy truncate script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) is called to truncate accuracy logs and then the [submission checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) is called to validate the generated submission tree.
+
+We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes.
+```
+cm run script --tags=generate,inference,submission --results_dir=$HOME/inference_3.0_results/valid_results \
+--device=cpu --submission_dir=$HOME/inference_submission_tree --clean --run-checker --submitter=cTuning
+--adr.inference-src.version=master --hw_notes_extra="Result taken by NAME" --quiet
+```
+
+
+## Tensorflow backend
+
+Same commands as for `onnxruntime` should work by replacing `backend=onnxruntime` with `--backend=tf`. For example,
+
+```
+cm run script --tags=generate-run-cmds,inference,_accuracy-only,_all-scenarios \
+--model=bert-99 --device=cpu --implementation=reference --backend=tf --execution-mode=valid \
+--results_dir=$HOME/inference_3.0_results --quiet
+```
+
+## Pytorch backend
+
+Same commands as for `onnxruntime` should work by replacing `backend=onnxruntime` with `--backend=pytorch`. For example,
+
+```
+cm run script --tags=generate-run-cmds,inference,_accuracy-only,_all-scenarios \
+--model=bert-99 --device=cpu --implementation=reference --backend=pytorch \
+--execution-mode=valid --results_dir=$HOME/inference_3.0_results --quiet
+```
+
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-resnet50-submission.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-resnet50-submission.md
new file mode 100644
index 0000000000..6d6ba275fd
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-resnet50-submission.md
@@ -0,0 +1,82 @@
+## Setup
+Please follow the MLCommons CK [installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) to install CM.
+Download the ck repo to get the CM script for MLPerf submission
+
+```
+cm pull repo mlcommons@ck --checkout=681547519f4d9a8991d992d1300c90cfde06e9b9
+```
+
+## Run Commands
+
+We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset and register in CM.
+
+On edge category ResNet50 has Offline, SingleStream and MultiStream scenarios and in datacenter category it has Offline and Server scenarios. The below commands are assuming an edge category system.
+
+### Onnxruntime backend
+
+#### Do a test run to detect and record the system performance
+
+```
+cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \
+--model=resnet50 --implementation=reference --device=cpu --backend=onnxruntime \
+--category=edge --division=open --quiet
+```
+* Use `--device=cuda` to run the inference on Nvidia GPU
+* Use `--division=closed` to run all scenarios for the closed division (compliance tests are skipped for `_find-performance` mode)
+* Use `--category=datacenter` to run datacenter scenarios
+
+#### Do a full accuracy and performance runs for all the scenarios
+
+```
+cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios --model=resnet50 \
+--device=cpu --implementation=reference --backend=onnxruntime \
+--execution-mode=valid --results_dir=$HOME/inference_3.0_results \
+--category=edge --division=open --quiet
+```
+
+* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs
+* Use `--division=closed` to run all scenarios for the closed division including the compliance tests
+* `--offline_target_qps`, `--server_target_qps`, `--singlestream_target_latency` and `multistream_target_latency` can be used to override the determined performance numbers
+
+#### Populate the README files
+```
+cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \
+--model=resnet50 --device=cpu --implementation=reference --backend=onnxruntime \
+--execution-mode=valid --results_dir=$HOME/inference_3.0_results \
+--category=edge --division=open --quiet
+```
+
+#### Generate actual submission tree
+
+Here, we are copying the performance and accuracy log files (compliance logs also in the case of closed division) from the results directory to the submission tree following the [directory structure required by MLCommons Inference](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#inference-1). After the submission tree is generated, [accuracy truncate script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) is called to truncate accuracy logs and then the [submission checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) is called to validate the generated submission tree.
+
+We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes.
+```
+cm run script --tags=generate,inference,submission --results_dir=$HOME/inference_3.0_results/valid_results \
+--submission_dir=$HOME/inference_submission_tree --clean \
+--run-checker --submitter=cTuning --adr.inference-src.version=master \
+--hw_notes_extra="Result taken by NAME" --quiet
+```
+
+
+## Tensorflow backend
+
+Same commands as for `onnxruntime` should work by replacing `backend=onnxruntime` with `--backend=tf`. For example,
+
+```
+cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \
+--model=resnet50 --device=cpu --implementation=reference --backend=tf \
+--execution-mode=valid --results_dir=$HOME/inference_3.0_results \
+--category=edge --division=open --quiet
+```
+
+## TVM backend
+
+Same commands as for `onnxruntime` should work by replacing `backend=onnxruntime` with `--backend=tvm-onnx`. (Only `--device=cpu` is currently supported for TVM) For example,
+
+```
+cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \
+--model=resnet50 --device=cpu --implementation=reference --backend=tvm-onnx \
+--execution-mode=valid --results_dir=$HOME/inference_3.0_results \
+--category=edge --division=open --quiet
+```
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-retinanet-submission.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-retinanet-submission.md
new file mode 100644
index 0000000000..4eedba9f31
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-retinanet-submission.md
@@ -0,0 +1,67 @@
+## Setup
+Please follow the MLCommons CK [installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) to install CM.
+Download the ck repo to get the CM script for MLPerf submission
+
+```
+cm pull repo mlcommons@ck --checkout=681547519f4d9a8991d992d1300c90cfde06e9b9
+```
+
+## Run Commands
+
+
+### Onnxruntime backend
+
+#### Do a test run to detect and record the system performance
+
+```
+cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \
+--model=retinanet --implementation=reference --device=cpu --backend=onnxruntime \
+--category=edge --division=open --quiet
+```
+* Use `--device=cuda` to run the inference on Nvidia GPU
+* Use `--division=closed` to run all scenarios for the closed division (compliance tests are skipped for `_find-performance` mode)
+* Use `--category=datacenter` to run datacenter scenarios
+
+#### Do a full accuracy and performance runs for all the scenarios
+
+```
+cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \
+--model=retinanet --device=cpu --implementation=reference --backend=onnxruntime \
+--execution-mode=valid --results_dir=$HOME/inference_3.0_results \
+--category=edge --division=open --quiet
+```
+
+* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs
+* Use `--division=closed` to run all scenarios for the closed division including the compliance tests
+* `--offline_target_qps`, `--server_target_qps`, `--singlestream_target_latency` and `multistream_target_latency` can be used to override the determined performance numbers
+
+#### Populate the README files
+```
+cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \
+--model=retinanet --device=cpu --implementation=reference --backend=onnxruntime \
+--execution-mode=valid --results_dir=$HOME/inference_3.0_results \
+--category=edge --division=open --quiet
+```
+
+#### Generate actual submission tree
+
+Here, we are copying the performance and accuracy log files (compliance logs also in the case of closed division) from the results directory to the submission tree following the [directory structure required by MLCommons Inference](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#inference-1). After the submission tree is generated, [accuracy truncate script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) is called to truncate accuracy logs and then the [submission checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) is called to validate the generated submission tree.
+
+We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes.
+```
+cm run script --tags=generate,inference,submission --results_dir=$HOME/inference_3.0_results/valid_results \
+--device=cpu --submission_dir=$HOME/inference_submission_tree --clean --run-checker --submitter=cTuning
+--adr.inference-src.version=master --hw_notes_extra="Result taken by NAME" --quiet
+```
+
+
+## Pytorch backend
+
+Same commands as for `onnxruntime` should work by replacing `backend=onnxruntime` with `--backend=pytorch`. For example,
+
+```
+cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \
+--model=retinanet --device=cpu --implementation=reference --backend=pytorch \
+--execution-mode=valid --results_dir=$HOME/inference_3.0_results \
+--category=edge --division=open --quiet
+```
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-rnnt-submission.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-rnnt-submission.md
new file mode 100644
index 0000000000..d7191c808d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-rnnt-submission.md
@@ -0,0 +1,53 @@
+## Setup
+Please follow the MLCommons CK [installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) to install CM.
+Download the ck repo to get the CM script for MLPerf submission
+
+```
+cm pull repo mlcommons@ck --checkout=681547519f4d9a8991d992d1300c90cfde06e9b9
+```
+
+## Run Commands
+
+### TensorRT backend
+
+#### Do a test run to detect and record the system performance
+
+```
+cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \
+--model=rnnt --implementation=nvidia-original --device=cuda --backend=tensorrt \
+--category=edge --division=open --quiet
+```
+* Use `--category=datacenter` to run datacenter scenarios
+* Use `--division=closed` to run all scenarios for the closed division (compliance tests are skipped for `_find-performance` mode)
+
+#### Do a full accuracy and performance runs for all the scenarios
+
+```
+cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \
+--model=rnnt --device=cuda --implementation=nvidia-original --backend=tensorrt \
+--execution-mode=valid --results_dir=$HOME/inference_3.0_results \
+--category=edge --division=open --quiet
+```
+
+* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs
+* Use `--division=closed` to run all scenarios for the closed division including the compliance tests
+* `--offline_target_qps`, `--server_target_qps` and `--singlestream_target_latency` can be used to override the determined performance numbers
+
+#### Populate the README files
+```
+cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \
+--model=rnnt --device=cuda --implementation=nvidia-original --backend=tensorrt \
+--execution-mode=valid --results_dir=$HOME/inference_3.0_results \
+--category=edge --division=open --quiet
+```
+
+#### Generate actual submission tree
+
+Here, we are copying the performance and accuracy log files (compliance logs also in the case of closed division) from the results directory to the submission tree following the [directory structure required by MLCommons Inference](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#inference-1). After the submission tree is generated, [accuracy truncate script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) is called to truncate accuracy logs and then the [submission checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) is called to validate the generated submission tree.
+
+We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes.
+```
+cm run script --tags=generate,inference,submission --results_dir=$HOME/inference_3.0_results/valid_results \
+--device=cpu --submission_dir=$HOME/inference_submission_tree --clean --run-checker --submitter=cTuning
+--adr.inference-src.version=master --hw_notes_extra="Result taken by NAME" --quiet
+```
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-aws-instance.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-aws-instance.md
new file mode 100644
index 0000000000..e1691c21ac
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-aws-instance.md
@@ -0,0 +1,48 @@
+The below instructions are for creating an AWS instance from the CLI. You can also create an instance via web and setup CM on it.
+
+## Prerequisites
+
+1. AWS Key, secret and token
+2. `*.pem` ssh key file to be used to create the instance (public key from here will be copied to the `$HOME/.ssh/authorized_keys` file in the created instance)
+
+## Run Commands
+
+We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset and register in CM.
+
+### Update Access Details
+
+```
+cd $HOME/CM/repos/mlcommon@ck/cm-mlops/script/run-terraform/aws/
+cp credentials.example credentials.sh
+```
+Update `credentials.sh` with your AWS Key, Secret and Token
+
+### Create an AWS Instance
+
+
+```
+cm run script --tags=run,terraform,_m7g.xlarge,_storage_size.500,_ubuntu.2204,_us-west-2 \
+--cminit --key_file=$HOME/cmuser.pem
+```
+
+The above command will output the IP of the created instance which will be having CM setup already done.
+
+`_m7g.xlarge,_storage_size.500,_ubuntu.2204` variations can be changed to launch a different instance. Below are the variation combinations we used for MLPerf inference 3.0 submissions.
+
+* `_g4dn.xlarge`
+* `_a1.2xlarge,_storage_size.130,_ubuntu.2204`
+* `_c5.4xlarge,_storage_size.130,_ubuntu.2204`
+* `_m7g.2xlarge,_storage_size.500,_ubuntu.2204`
+* `_inf1.2xlarge,_storage_size.500,_amazon-linux-2-kernel.510`
+* `_t2.medium,_storage_size.200,_rhel.9`
+
+### Copy the needed files from the local machine
+
+Copy the imagenet dataset to the created instance. For example,
+
+```
+rsync -avz -e 'ssh -i $HOME/cmuser.pem' $HOME/imagenet-2012-val/ ubuntu@54.189.93.134:
+```
+For using [nvidia-original implementation](https://github.com/mlcommons/ck/tree/main/cm-mlops/script/reproduce-mlperf-inference-nvidia) tar files for cuDNN and TensorRT are needed to be downloaded locally from Nvidia website and copied to the AWS instance similar to the above command.
+
+Once all the required files are copied over, login to the instance and follow the individual benchmark instructions from the README files given [here](./)
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-gcp-instance.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-gcp-instance.md
new file mode 100644
index 0000000000..6bd16556a3
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-gcp-instance.md
@@ -0,0 +1,35 @@
+The below instructions are for creating a Google Cloud instance from the CLI. You can also create an instance via web and setup CM on it.
+
+## Prerequisites
+
+Please follow the authentication instructions given [here](https://github.com/ctuning/mlcommons-ck/blob/master/cm-mlops/script/run-terraform/README-about.md).
+
+
+## Run Commands
+
+We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset and register in CM.
+
+
+### Create a GCP Instance
+
+
+```
+cm run script --tags=run,terraform,_gcp,_n1-highmem.4,_gcp_project.mlperf-inference-tests --cminit
+```
+
+The above command will output the IP of the created instance which will be having CM setup already done.
+
+`_n1-highmem.4` variation can be changed to launch a different instance. Below are the variation combinations we used for MLPerf inference 3.0 submissions.
+
+* `_n1-standard.4`
+
+### Copy the needed files
+
+Copy the imagenet dataset to the created instance. For example,
+
+```
+rsync -avz -e 'ssh -i $HOME/cmuser.pem' $HOME/imagenet-2012-val/ ubuntu@54.189.93.134:
+```
+For using [nvidia-original implementation](https://github.com/mlcommons/ck/tree/main/cm-mlops/script/reproduce-mlperf-inference-nvidia) tar files for cuDNN and TensorRT are needed to be downloaded locally from Nvidia website and copied to the AWS instance similar to the above command.
+
+Once all the required files are copied over, login to the instance and follow the individual benchmark instructions from the README files given [here](./)
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-nvidia-jetson-orin.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-nvidia-jetson-orin.md
new file mode 100644
index 0000000000..68db00ea0e
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-nvidia-jetson-orin.md
@@ -0,0 +1,53 @@
+## Setup
+We used Nvidia Jetson AGX Orin developer kit with 32GB RAM and 64GB eMMC. We also connected a 500GB SSD disk via USB and Wifi connection for internet connectivity.
+
+We used the out of the box developer kit image which was running Ubuntu 20.04 and JetPack 5.0.1 Developer Preview (L4T 34.1.1) with CUDA 11.4. We were also using the default 4k page size (Nvidia recommends 64k for MLPerf inference).
+
+[cuDNN 8.6.0](https://developer.nvidia.com/compute/cudnn/secure/8.6.0/local_installers/11.8/cudnn-local-repo-ubuntu2004-8.6.0.163_1.0-1_arm64.deb) and [TensorRT 8.5.2.2](https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/secure/8.5.3/local_repos/nv-tensorrt-local-repo-ubuntu2004-8.5.3-cuda-11.8_1.0-1_arm64.deb) were downloaded as Debian packages on a host machine, copied over to Nvidia Jetson Orin and installed.
+
+
+We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset.
+
+### Copy the needed files from a host machine
+
+Copy the imagenet dataset to the created instance. For example,
+
+```
+rsync -avz $HOME/imagenet-2012-val/ user@192.168.0.27:
+```
+
+Login to Orin and register the imagenet dataset as
+```
+cm run script --tags=get,imagenet,dataset,_2012,_full --input=$HOME/imagenet-2012-val
+```
+
+Once all the required files are copied over, follow the individual benchmark instructions from the README files given [here](./) All the required dependencies should be resolved by CM.
+
+### Power Measurement Setup
+
+We were measuring power in the peak performance mode (MaxN) except for one SUT where the energy efficiency mode was changed to Max15. Our aim was to showcase the out of the box performance of Nvidia Jetson AGX Orin including the power usage.
+
+## Reproducing the Nvidia Jetson AGX Orin Submission
+
+After our submission we followed the instructions from Nvidia in the inference v3.0 repository and tried to reproduce the numbers from Nvidia. For MaxN mode we were able to match the numbers by Nvidia using same versions of CUDA, cuDNN and TensorRT but outside of docker. For MaxQ mode, we could get the same performance as Nvidia but our power usage was about 5W higher.
+
+### Performance results MaxN
+
+The below table shows the performance comparison of our results under different settings and the Nvidia submission for MLPerf inference 3.0. We'll be updating our instructions for easier reproducibility of these numbers including CM scripts for flashing the L4T image and rebuilding the kernel for 64k pagesize.
+
+
+| Workload | Results | L4T | PAGESIZE | Power Mode | FAN Dynamic Speed control | Offline Accuracy | Offline Performance | SingleStream Accuracy | SingleStream Performance | MultiStream Accuracy | MultiStream Performance |
+| --------- | --------------------------------- | ----- | -------- | ---------- | ------------------------- | ---------------- | ------------------- | --------------------- | ------------------------ | -------------------- | ----------------------- |
+| ResNet50 | Nvidia Submitted (docker) | r35.3 | 64k | MaxN | active | 75.934 | 6438.1 | 76.032 | 0.633479 | 76.032 | 2.187731 |
+| ResNet50 | cTuning Submitted | r34.1.1 | 4k | MaxN | active | 75.934 | 4697 | 76.032 | 0.72 | 76.032 | 2.57 |
+| ResNet50 | MLCommons taskforce on reproducibility | r35.2.1 | 4k | MaxN | active | 75.85 | 6172 | 76.056 | 0.644 | 76.056 | 2.074 |
+| ResNet50 | MLCommons taskforce on reproducibility | r35.3 | 64k | MaxN | active | 75.85 | 6430 | 76.056 | 0.659 | 76.056 | 2.20 |
+| RetinaNet | Nvidia Submitted (docker) | r35.3 | x | MaxN | active | 37.372 | 92.4048 | 37.403 | 13.924457 | 37.519 | 104.680313 |
+| RetinaNet | MLCommons taskforce on reproducibility | r35.2.1 | 4k | MaxN | active | 37.346 | 80.0854 (no DLA) | 37.350 | 14,19 | 37.409 | 105.344828 |
+| RetinaNet | MLCommons taskforce on reproducibility | r35.3 | 64k | MaxN | active | 37.345 | 94.6886 | 37.340 | 14.073 | 37.488 | 103.8 |
+| BERT | Nvidia Submitted (docker) | r35.3 | x | MaxN | active | 90.552 | 544.243 | 90.344 | 5.635431 | NA | NA |
+| BERT | cTuning Submitted | r34.1.1 | 4k | MaxN | active | 90.552 | 449.96 | 90.344 | 7.8 | NA | NA |
+| BERT | MLCommons taskforce on reproducibility | r35.2.1 | 4k | MaxN | active | 90.562 | 527 (128 batchsize) | 90.311 | 6.636 | NA | NA |
+| BERT | MLCommons taskforce on reproducibility | r35.3 | 64k | MaxN | active | 90.552 | 539 | 90.344 | 6.31 | NA | NA |
+
+
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/README.md
new file mode 100644
index 0000000000..6362f3eb66
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/README.md
@@ -0,0 +1,83 @@
+### Introduction
+
+Our goal is to help the community benchmark and optimize various AI/ML applications
+across diverse software and hardware provided by volunteers similar to SETI@home!
+
+Open-source [MLPerf inference benchmarks](https://arxiv.org/abs/1911.02549)
+were developed by a [consortium of 50+ companies and universities (MLCommons)](https://mlcommons.org)
+to enable trustable and reproducible comparison of AI/ML systems
+in terms of latency, throughput, power consumption, accuracy and other metrics
+across diverse software/hardware stacks from different vendors.
+
+However, running MLPerf inference benchmarks and submitting results [turned out to be a challenge](https://doi.org/10.5281/zenodo.8144274)
+even for experts and could easily take many weeks to prepare. That's why [MLCommons](https://mlcommons.org),
+[cTuning.org](https://www.linkedin.com/company/ctuning-foundation)
+and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)
+decided to develop an open-source, technology-agnostic
+and non-intrusive [Collective Mind automation language (CM)](https://github.com/mlcommons/ck)
+and [Collective Knowledge Playground (CK)](https://access.cknowledge.org/playground/?action=experiments)
+to help anyone run, reproduce, optimize and compare MLPerf inference benchmarks out-of-the-box
+across diverse software, hardware, models and data sets.
+
+You can read more about our vision, open-source technology and future plans
+in this [presentation](https://doi.org/10.5281/zenodo.8105339).
+
+
+
+### Advanced challenge
+
+We would like to ask volunteers run various MLPerf inference benchmarks
+on diverse CPUs (Intel, AMD, Arm) and Nvidia GPUs similar to SETI@home
+across different framework (ONNX, PyTorch, TF, TFLite)
+either natively or in a cloud (AWS, Azure, GCP, Alibaba, Oracle, OVHcloud, ...)
+and submit results to MLPerf inference v3.1.
+
+However, since some benchmarks may take 1..2 days to run, we suggest to start in the following order (these links describe CM commands to run benchmarks and submit results):
+* [CPU: Reference implementation of Image Classification with ResNet50 (open and then closed division)](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/resnet50/README_reference.md)
+* [CPU: TFLite C++ implementation of Image classification with variations of MobileNets and EfficientNets (open division)](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/run-mlperf-inference-mobilenet-models/README-about.md)
+* [Nvidia GPU: Nvidia optimized implementation of Image Classification with ResNet50 (open and then closed division)](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/resnet50/README_nvidia.md)
+* [Nvidia GPU: Nvidia optimized implementation of Language processing with BERT large (open and then closed division)](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/bert/README_nvidia.md)
+* [Nvidia GPU: Reference implementation of Image Classification with ResNet50 (open and then closed division)](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/bert/README_nvidia.md)
+* [Nvidia GPU: Reference implementation of Language processing with BERT large (open and then closed division)](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/resnet50/README_reference.md)
+* [Nvidia GPU (24GB of memory min): Reference implementation of Language processing with GPT-J 6B (open)](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/gpt-j/README_reference.md)
+* [Nvidia GPU: Nvidia optimized implementation of all other models (open and closed division)](https://github.com/ctuning/mlcommons-ck/blob/master/docs/mlperf/inference/README.md#run-benchmarks-and-submit-results)
+
+Please read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
+to set up and run above benchmarks using CM.
+
+You can register your participation for the [Collective Knowledge leaderboard]( https://access.cKnowledge.org/playground/?action=contributors )
+using this [guide](https://github.com/mlcommons/ck/blob/master/platform/register.md).
+
+Please report encountered problems using [GitHub issues](https://github.com/mlcommons/ck/issues)
+to help the community
+improve the portability of the CM automation for MLPerf and other benchmarks and projects.
+
+Looking forward to your submissions and happy hacking!
+
+
+
+### Prizes
+
+* *All submitters will receive 1 point for submitting valid results for 1 complete benchmark on one system.*
+* *All submitters will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).*
+* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*.
+
+
+### Organizers
+
+* [MLCommons](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation)
+* [cKnowledge.org](https://www.linkedin.com/company/cknowledge)
+
+
+### Status
+
+You can see shared results in [this repostiory](https://github.com/ctuning/mlperf_inference_submissions_v3.1)
+with PRs from participants [here](https://github.com/ctuning/mlperf_inference_submissions_v3.1/pulls).
+
+### Results
+
+All accepted results will be publicly available in the CM format with derived metrics
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
+in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
+and at official [MLCommons website](https://mlcommons.org).
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/_cm.json
new file mode 100644
index 0000000000..a30c26c928
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/_cm.json
@@ -0,0 +1,26 @@
+{
+ "alias": "optimize-mlperf-inference-v3.1-2023",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close": "20230817",
+ "date_open": "20230704",
+ "experiments": [],
+ "points": 1,
+ "sort": -10,
+ "tags": [
+ "modularize",
+ "optimize",
+ "reproduce",
+ "replicate",
+ "benchmark",
+ "automate",
+ "mlperf",
+ "mlperf-inference",
+ "mlperf-inference-v3.1",
+ "mlperf-inference-v3.1-2023",
+ "v3.1"
+ ],
+ "title": "Crowd-benchmark all MLPerf inference benchmarks similar to SETI@home (latency, throughput, power consumption, accuracy, costs)",
+ "trophies": true,
+ "uid": "3e971d8089014d1f"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-3d-unet-submission.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-3d-unet-submission.md
new file mode 100644
index 0000000000..9806c22647
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-3d-unet-submission.md
@@ -0,0 +1,67 @@
+## Setup
+
+Please follow this [installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md)
+to install the MLCommons CM reproducibility and automation language in your native environment or Docker container.
+
+Then install the repository with CM automation scripts to run MLPerf benchmarks out-of-the-box
+across different software, hardware, models and data sets:
+
+
+```
+cm pull repo mlcommons@ck
+```
+
+Note that you can install Python virtual environment via CM to avoid contaminating
+your local Python installation as described [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/script/README-extra.md#using-python-virtual-environments).
+
+## Run Commands
+
+3d-unet has two variants - `3d-unet-99` and `3d-unet-99.9` where the `99` and `99.9` specifies the required accuracy constraint with respect to the reference floating point model. Both models can be submitter under edge as well as datacenter category.
+
+Since 3d-unet is one of the slowest running model, we are only running it using nvidia-implementation where the model is quantized and run on TensorRT backend on Nvidia GPU.
+
+For `3d-unet-99.9` runs, simply replace `3d-unet-99` with `3d-unet-99.9`.
+
+### TensorRT backend
+
+#### Do a test run to detect and record the system performance
+
+```
+cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \
+--model=3d-unet-99 --implementation=nvidia-original --device=cuda --backend=tensorrt \
+--category=edge --division=open --quiet
+```
+* Use `--category=datacenter` to run datacenter scenarios
+* Use `--division=closed` to run all scenarios for the closed division (compliance tests are skipped for `_find-performance` mode)
+
+#### Do a full accuracy and performance runs for all the scenarios
+
+```
+cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \
+--model=3d-unet-99 --device=cuda --implementation=nvidia-original --backend=tensorrt \
+--execution-mode=valid --results_dir=$HOME/inference_3.1_results \
+--category=edge --division=open --quiet
+```
+
+* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs
+* Use `--division=closed` to run all scenarios for the closed division including the compliance tests
+* `--offline_target_qps`, `--server_target_qps` and `--singlestream_target_latency` can be used to override the determined performance numbers
+
+#### Populate the README files
+```
+cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \
+--model=3d-unet-99 --device=cuda --implementation=nvidia-original --backend=tensorrt \
+--execution-mode=valid --results_dir=$HOME/inference_3.1_results \
+--category=edge --division=open --quiet
+```
+
+#### Generate actual submission tree
+
+Here, we are copying the performance and accuracy log files (compliance logs also in the case of closed division) from the results directory to the submission tree following the [directory structure required by MLCommons Inference](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#inference-1). After the submission tree is generated, [accuracy truncate script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) is called to truncate accuracy logs and then the [submission checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) is called to validate the generated submission tree.
+
+We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes.
+```
+cm run script --tags=generate,inference,submission --results_dir=$HOME/inference_3.1_results/valid_results \
+--device=cpu --submission_dir=$HOME/inference_submission_tree --clean --run-checker --submitter=cTuning
+--adr.inference-src.version=master --hw_notes_extra="Result taken by NAME" --quiet
+```
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-bert-submission.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-bert-submission.md
new file mode 100644
index 0000000000..c43363c1e9
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-bert-submission.md
@@ -0,0 +1,113 @@
+## Setup
+
+Please follow this [installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md)
+to install the MLCommons CM reproducibility and automation language in your native environment or Docker container.
+
+Then install the repository with CM automation scripts to run MLPerf benchmarks out-of-the-box
+across different software, hardware, models and data sets:
+
+
+```
+cm pull repo mlcommons@ck
+```
+
+Note that you can install Python virtual environment via CM to avoid contaminating
+your local Python installation as described [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/script/README-extra.md#using-python-virtual-environments).
+
+## Run Commands
+
+Bert has two variants - `bert-99` and `bert-99.9` where the `99` and `99.9` specifies the required accuracy constraint with respect to the reference floating point model. `bert-99.9` model is applicable only on a datacenter system.
+
+On edge category `bert-99` has Offline and SingleStream scenarios and in datacenter category both `bert-99` and `bert-99.9` have Offline and Server scenarios. The below commands are assuming an edge category system.
+
+### Onnxruntime backend (Reference implementation)
+
+#### Do a test run to detect and record the system performance
+
+```
+cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \
+--model=bert-99 --implementation=reference --device=cpu --backend=onnxruntime \
+--category=edge --division=open --quiet
+```
+* Use `--device=cuda` to run the inference on Nvidia GPU
+* Use `--division=closed` to run all scenarios for the closed division including the compliance tests
+* Use `--category=datacenter` to run datacenter scenarios
+
+#### Do a full accuracy and performance runs for all the scenarios
+
+```
+cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \
+--model=bert-99 --device=cpu --implementation=reference --backend=onnxruntime \
+--execution-mode=valid --results_dir=$HOME/inference_3.1_results \
+--category=edge --division=open --quiet
+```
+
+* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs. This requires a power analyzer as described [here](https://github.com/ctuning/mlcommons-ck/blob/master/docs/tutorials/mlperf-inference-power-measurement.md)
+* Use `--division=closed` to run all scenarios for the closed division including the compliance tests
+* `--offline_target_qps`, `--server_target_qps` and `--singlestream_target_latency` can be used to override the determined performance numbers
+
+#### Populate the README files
+```
+cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \
+--model=bert-99 --device=cpu --implementation=reference --backend=onnxruntime \
+--execution-mode=valid --results_dir=$HOME/inference_3.1_results \
+--category=edge --division=open --quiet
+```
+
+#### Generate actual submission tree
+
+Here, we are copying the performance and accuracy log files (compliance logs also in the case of closed division) from the results directory to the submission tree following the [directory structure required by MLCommons Inference](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#inference-1). After the submission tree is generated, [accuracy truncate script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) is called to truncate accuracy logs and then the [submission checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) is called to validate the generated submission tree.
+
+We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes.
+```
+cm run script --tags=generate,inference,submission --results_dir=$HOME/inference_3.1_results/valid_results \
+--device=cpu --submission_dir=$HOME/inference_submission_tree --clean --run-checker --submitter=cTuning
+--adr.inference-src.version=master --hw_notes_extra="Result taken by NAME" --quiet
+```
+
+
+## Tensorflow backend (Reference implementation)
+
+Same commands as for `onnxruntime` should work by replacing `backend=onnxruntime` with `--backend=tf`. For example,
+
+```
+cm run script --tags=generate-run-cmds,inference,_accuracy-only,_all-scenarios \
+--model=bert-99 --device=cpu --implementation=reference --backend=tf --execution-mode=valid \
+--results_dir=$HOME/inference_3.1_results --quiet
+```
+
+## Pytorch backend (Reference implementation)
+
+Same commands as for `onnxruntime` should work by replacing `backend=onnxruntime` with `--backend=pytorch`. For example,
+
+```
+cm run script --tags=generate-run-cmds,inference,_accuracy-only,_all-scenarios \
+--model=bert-99 --device=cpu --implementation=reference --backend=pytorch \
+--execution-mode=valid --results_dir=$HOME/inference_3.1_results --quiet
+```
+
+## TensorRT backend (Nvidia implementation)
+
+For TensorRT backend we are using the [Nvidia implementation](https://github.com/ctuning/mlcommons-ck/tree/master/cm-mlops/script/reproduce-mlperf-inference-nvidia) and not the [MLPerf inference reference implementation](https://github.com/ctuning/mlcommons-ck/tree/master/cm-mlops/script/app-mlperf-inference-reference) for the below reasons
+* TensorRT backend is not supported by default in the reference implementation
+* Reference implemnetation is mostly for fp32 models and quantization is not suppoted by default
+* Nvidia has done some fantastic work in optimizing performance for TensorRT backend
+
+To get setup please follow the instructions [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/reproduce-mlperf-inference-nvidia/README-about.md) to download and install TensorRT and cuDNN unless you already have them installed. This readme also details how to handle the configuration files which are automatically generated by the Nvidia implementation scripts. Once this is done, the following command will run all the modes and scenarios.
+
+```
+cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \
+--model=bert-99 --device=cuda --implementation=nvidia-original --backend=tensorrt \
+--execution-mode=valid --results_dir=$HOME/inference_3.1_results --quiet
+```
+
+* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs. This requires a power analyzer as described [here](https://github.com/ctuning/mlcommons-ck/blob/master/docs/tutorials/mlperf-inference-power-measurement.md)
+* Use `--division=closed` to run all scenarios for the closed division including the compliance tests
+* `--offline_target_qps`, `--server_target_qps` and `--singlestream_target_latency` can be used to override the default performance numbers
+* Use `--division=closed` to run all scenarios for the closed division including the compliance tests
+* Use `--category=datacenter` to run datacenter scenarios
+
+
+TensorRT backend has an engine generation stage which can be time consuming. For repeated runs `--adr.nvidia-harness.make_cmd=run_harness` option will avoid this engine regeneration and reuse the previously generated one.
+
+
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-resnet50-submission.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-resnet50-submission.md
new file mode 100644
index 0000000000..470930e373
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-resnet50-submission.md
@@ -0,0 +1,90 @@
+## Setup
+
+Please follow this [installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md)
+to install the MLCommons CM reproducibility and automation language in your native environment or Docker container.
+
+Then install the repository with CM automation scripts to run MLPerf benchmarks out-of-the-box
+across different software, hardware, models and data sets:
+
+
+```
+cm pull repo mlcommons@ck
+```
+
+Note that you can install Python virtual environment via CM to avoid contaminating
+your local Python installation as described [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/script/README-extra.md#using-python-virtual-environments).
+
+## Run Commands
+
+We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset and register in CM.
+
+On edge category ResNet50 has Offline, SingleStream and MultiStream scenarios and in datacenter category it has Offline and Server scenarios. The below commands are assuming an edge category system.
+
+### Onnxruntime backend
+
+#### Do a test run to detect and record the system performance
+
+```
+cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \
+--model=resnet50 --implementation=reference --device=cpu --backend=onnxruntime \
+--category=edge --division=open --quiet
+```
+* Use `--device=cuda` to run the inference on Nvidia GPU
+* Use `--division=closed` to run all scenarios for the closed division (compliance tests are skipped for `_find-performance` mode)
+* Use `--category=datacenter` to run datacenter scenarios
+
+#### Do a full accuracy and performance runs for all the scenarios
+
+```
+cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios --model=resnet50 \
+--device=cpu --implementation=reference --backend=onnxruntime \
+--execution-mode=valid --results_dir=$HOME/inference_3.1_results \
+--category=edge --division=open --quiet
+```
+
+* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs
+* Use `--division=closed` to run all scenarios for the closed division including the compliance tests
+* `--offline_target_qps`, `--server_target_qps`, `--singlestream_target_latency` and `multistream_target_latency` can be used to override the determined performance numbers
+
+#### Populate the README files
+```
+cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \
+--model=resnet50 --device=cpu --implementation=reference --backend=onnxruntime \
+--execution-mode=valid --results_dir=$HOME/inference_3.1_results \
+--category=edge --division=open --quiet
+```
+
+#### Generate actual submission tree
+
+Here, we are copying the performance and accuracy log files (compliance logs also in the case of closed division) from the results directory to the submission tree following the [directory structure required by MLCommons Inference](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#inference-1). After the submission tree is generated, [accuracy truncate script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) is called to truncate accuracy logs and then the [submission checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) is called to validate the generated submission tree.
+
+We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes.
+```
+cm run script --tags=generate,inference,submission --results_dir=$HOME/inference_3.1_results/valid_results \
+--submission_dir=$HOME/inference_submission_tree --clean \
+--run-checker --submitter=cTuning --adr.inference-src.version=master \
+--hw_notes_extra="Result taken by NAME" --quiet
+```
+
+
+## Tensorflow backend
+
+Same commands as for `onnxruntime` should work by replacing `backend=onnxruntime` with `--backend=tf`. For example,
+
+```
+cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \
+--model=resnet50 --device=cpu --implementation=reference --backend=tf \
+--execution-mode=valid --results_dir=$HOME/inference_3.1_results \
+--category=edge --division=open --quiet
+```
+
+## TVM backend
+
+Same commands as for `onnxruntime` should work by replacing `backend=onnxruntime` with `--backend=tvm-onnx`. (Only `--device=cpu` is currently supported for TVM) For example,
+
+```
+cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \
+--model=resnet50 --device=cpu --implementation=reference --backend=tvm-onnx \
+--execution-mode=valid --results_dir=$HOME/inference_3.1_results \
+--category=edge --division=open --quiet
+```
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-retinanet-submission.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-retinanet-submission.md
new file mode 100644
index 0000000000..4420462cde
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-retinanet-submission.md
@@ -0,0 +1,75 @@
+## Setup
+
+Please follow this [installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md)
+to install the MLCommons CM reproducibility and automation language in your native environment or Docker container.
+
+Then install the repository with CM automation scripts to run MLPerf benchmarks out-of-the-box
+across different software, hardware, models and data sets:
+
+
+```
+cm pull repo mlcommons@ck
+```
+
+Note that you can install Python virtual environment via CM to avoid contaminating
+your local Python installation as described [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/script/README-extra.md#using-python-virtual-environments).
+
+## Run Commands
+
+
+### Onnxruntime backend
+
+#### Do a test run to detect and record the system performance
+
+```
+cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \
+--model=retinanet --implementation=reference --device=cpu --backend=onnxruntime \
+--category=edge --division=open --quiet
+```
+* Use `--device=cuda` to run the inference on Nvidia GPU
+* Use `--division=closed` to run all scenarios for the closed division (compliance tests are skipped for `_find-performance` mode)
+* Use `--category=datacenter` to run datacenter scenarios
+
+#### Do a full accuracy and performance runs for all the scenarios
+
+```
+cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \
+--model=retinanet --device=cpu --implementation=reference --backend=onnxruntime \
+--execution-mode=valid --results_dir=$HOME/inference_3.1_results \
+--category=edge --division=open --quiet
+```
+
+* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs
+* Use `--division=closed` to run all scenarios for the closed division including the compliance tests
+* `--offline_target_qps`, `--server_target_qps`, `--singlestream_target_latency` and `multistream_target_latency` can be used to override the determined performance numbers
+
+#### Populate the README files
+```
+cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \
+--model=retinanet --device=cpu --implementation=reference --backend=onnxruntime \
+--execution-mode=valid --results_dir=$HOME/inference_3.1_results \
+--category=edge --division=open --quiet
+```
+
+#### Generate actual submission tree
+
+Here, we are copying the performance and accuracy log files (compliance logs also in the case of closed division) from the results directory to the submission tree following the [directory structure required by MLCommons Inference](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#inference-1). After the submission tree is generated, [accuracy truncate script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) is called to truncate accuracy logs and then the [submission checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) is called to validate the generated submission tree.
+
+We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes.
+```
+cm run script --tags=generate,inference,submission --results_dir=$HOME/inference_3.1_results/valid_results \
+--device=cpu --submission_dir=$HOME/inference_submission_tree --clean --run-checker --submitter=cTuning
+--adr.inference-src.version=master --hw_notes_extra="Result taken by NAME" --quiet
+```
+
+
+## Pytorch backend
+
+Same commands as for `onnxruntime` should work by replacing `backend=onnxruntime` with `--backend=pytorch`. For example,
+
+```
+cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \
+--model=retinanet --device=cpu --implementation=reference --backend=pytorch \
+--execution-mode=valid --results_dir=$HOME/inference_3.1_results \
+--category=edge --division=open --quiet
+```
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-rnnt-submission.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-rnnt-submission.md
new file mode 100644
index 0000000000..a6ca069215
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-rnnt-submission.md
@@ -0,0 +1,61 @@
+## Setup
+
+Please follow this [installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md)
+to install the MLCommons CM reproducibility and automation language in your native environment or Docker container.
+
+Then install the repository with CM automation scripts to run MLPerf benchmarks out-of-the-box
+across different software, hardware, models and data sets:
+
+
+```
+cm pull repo mlcommons@ck
+```
+
+Note that you can install Python virtual environment via CM to avoid contaminating
+your local Python installation as described [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/script/README-extra.md#using-python-virtual-environments).
+
+## Run Commands
+
+### TensorRT backend
+
+#### Do a test run to detect and record the system performance
+
+```
+cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \
+--model=rnnt --implementation=nvidia-original --device=cuda --backend=tensorrt \
+--category=edge --division=open --quiet
+```
+* Use `--category=datacenter` to run datacenter scenarios
+* Use `--division=closed` to run all scenarios for the closed division (compliance tests are skipped for `_find-performance` mode)
+
+#### Do a full accuracy and performance runs for all the scenarios
+
+```
+cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \
+--model=rnnt --device=cuda --implementation=nvidia-original --backend=tensorrt \
+--execution-mode=valid --results_dir=$HOME/inference_3.1_results \
+--category=edge --division=open --quiet
+```
+
+* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs
+* Use `--division=closed` to run all scenarios for the closed division including the compliance tests
+* `--offline_target_qps`, `--server_target_qps` and `--singlestream_target_latency` can be used to override the determined performance numbers
+
+#### Populate the README files
+```
+cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \
+--model=rnnt --device=cuda --implementation=nvidia-original --backend=tensorrt \
+--execution-mode=valid --results_dir=$HOME/inference_3.1_results \
+--category=edge --division=open --quiet
+```
+
+#### Generate actual submission tree
+
+Here, we are copying the performance and accuracy log files (compliance logs also in the case of closed division) from the results directory to the submission tree following the [directory structure required by MLCommons Inference](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#inference-1). After the submission tree is generated, [accuracy truncate script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) is called to truncate accuracy logs and then the [submission checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) is called to validate the generated submission tree.
+
+We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes.
+```
+cm run script --tags=generate,inference,submission --results_dir=$HOME/inference_3.1_results/valid_results \
+--device=cpu --submission_dir=$HOME/inference_submission_tree --clean --run-checker --submitter=cTuning
+--adr.inference-src.version=master --hw_notes_extra="Result taken by NAME" --quiet
+```
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-aws-instance.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-aws-instance.md
new file mode 100644
index 0000000000..152c612aad
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-aws-instance.md
@@ -0,0 +1,50 @@
+## Setup ASW instance for MLPerf
+
+The below instructions are for creating an AWS instance from the CLI. You can also create an instance via web and setup CM on it.
+
+## Prerequisites
+
+1. AWS Key, secret and token
+2. `*.pem` ssh key file to be used to create the instance (public key from here will be copied to the `$HOME/.ssh/authorized_keys` file in the created instance)
+
+## Run Commands
+
+We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset and register in CM.
+
+### Update Access Details
+
+```
+cd $HOME/CM/repos/mlcommon@ck/cm-mlops/script/run-terraform/aws/
+cp credentials.example credentials.sh
+```
+Update `credentials.sh` with your AWS Key, Secret and Token
+
+### Create an AWS Instance
+
+
+```
+cm run script --tags=run,terraform,_m7g.xlarge,_storage_size.500,_ubuntu.2204,_us-west-2 \
+--cminit --key_file=$HOME/cmuser.pem
+```
+
+The above command will output the IP of the created instance which will be having CM setup already done.
+
+`_m7g.xlarge,_storage_size.500,_ubuntu.2204` variations can be changed to launch a different instance. Below are the variation combinations we used for MLPerf inference 3.0 submissions.
+
+* `_g4dn.xlarge`
+* `_a1.2xlarge,_storage_size.130,_ubuntu.2204`
+* `_c5.4xlarge,_storage_size.130,_ubuntu.2204`
+* `_m7g.2xlarge,_storage_size.500,_ubuntu.2204`
+* `_inf1.2xlarge,_storage_size.500,_amazon-linux-2-kernel.510`
+* `_t2.medium,_storage_size.200,_rhel.9`
+
+### Copy the needed files from the local machine
+
+Copy the imagenet dataset to the created instance. For example,
+
+```
+rsync -avz -e 'ssh -i $HOME/cmuser.pem' $HOME/imagenet-2012-val/ ubuntu@54.189.93.134:
+```
+For using [nvidia-original implementation](https://github.com/mlcommons/ck/tree/main/cm-mlops/script/reproduce-mlperf-inference-nvidia) tar files for cuDNN and TensorRT are needed to be downloaded locally from Nvidia website and copied to the AWS instance similar to the above command.
+
+Once all the required files are copied over, login to the instance and follow the individual benchmark instructions from the README files given [here](./)
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-gcp-instance.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-gcp-instance.md
new file mode 100644
index 0000000000..a3a0e457a1
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-gcp-instance.md
@@ -0,0 +1,37 @@
+## Setup GCP instance for MLPerf
+
+The below instructions are for creating a Google Cloud instance from the CLI. You can also create an instance via web and setup CM on it.
+
+## Prerequisites
+
+Please follow the authentication instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/run-terraform/README-about.md).
+
+
+## Run Commands
+
+We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset and register in CM.
+
+
+### Create a GCP Instance
+
+
+```
+cm run script --tags=run,terraform,_gcp,_n1-highmem.4,_gcp_project.mlperf-inference-tests --cminit
+```
+
+The above command will output the IP of the created instance which will be having CM setup already done.
+
+`_n1-highmem.4` variation can be changed to launch a different instance. Below are the variation combinations we used for MLPerf inference 3.0 submissions.
+
+* `_n1-standard.4`
+
+### Copy the needed files
+
+Copy the imagenet dataset to the created instance. For example,
+
+```
+rsync -avz -e 'ssh -i $HOME/cmuser.pem' $HOME/imagenet-2012-val/ ubuntu@54.189.93.134:
+```
+For using [nvidia-original implementation](https://github.com/mlcommons/ck/tree/main/cm-mlops/script/reproduce-mlperf-inference-nvidia) tar files for cuDNN and TensorRT are needed to be downloaded locally from Nvidia website and copied to the AWS instance similar to the above command.
+
+Once all the required files are copied over, login to the instance and follow the individual benchmark instructions from the README files given [here](./)
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-nvidia-jetson-orin.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-nvidia-jetson-orin.md
new file mode 100644
index 0000000000..08c0a8eeb0
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-nvidia-jetson-orin.md
@@ -0,0 +1,54 @@
+## Setup
+
+We used Nvidia Jetson AGX Orin developer kit with 32GB RAM and 64GB eMMC. We also connected a 500GB SSD disk via USB and Wifi connection for internet connectivity.
+
+We used the out of the box developer kit image which was running Ubuntu 20.04 and JetPack 5.0.1 Developer Preview (L4T 34.1.1) with CUDA 11.4. We were also using the default 4k page size (Nvidia recommends 64k for MLPerf inference).
+
+[cuDNN 8.6.0](https://developer.nvidia.com/compute/cudnn/secure/8.6.0/local_installers/11.8/cudnn-local-repo-ubuntu2004-8.6.0.163_1.0-1_arm64.deb) and [TensorRT 8.5.2.2](https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/secure/8.5.3/local_repos/nv-tensorrt-local-repo-ubuntu2004-8.5.3-cuda-11.8_1.0-1_arm64.deb) were downloaded as Debian packages on a host machine, copied over to Nvidia Jetson Orin and installed.
+
+
+We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset.
+
+### Copy the needed files from a host machine
+
+Copy the imagenet dataset to the created instance. For example,
+
+```
+rsync -avz $HOME/imagenet-2012-val/ user@192.168.0.27:
+```
+
+Login to Orin and register the imagenet dataset as
+```
+cm run script --tags=get,imagenet,dataset,_2012,_full --input=$HOME/imagenet-2012-val
+```
+
+Once all the required files are copied over, follow the individual benchmark instructions from the README files given [here](./) All the required dependencies should be resolved by CM.
+
+### Power Measurement Setup
+
+We were measuring power in the peak performance mode (MaxN) except for one SUT where the energy efficiency mode was changed to Max15. Our aim was to showcase the out of the box performance of Nvidia Jetson AGX Orin including the power usage.
+
+## Reproducing the Nvidia Jetson AGX Orin Submission
+
+After our submission we followed the instructions from Nvidia in the inference v3.0 repository and tried to reproduce the numbers from Nvidia. For MaxN mode we were able to match the numbers by Nvidia using same versions of CUDA, cuDNN and TensorRT but outside of docker. For MaxQ mode, we could get the same performance as Nvidia but our power usage was about 5W higher.
+
+### Performance results MaxN
+
+The below table shows the performance comparison of our results under different settings and the Nvidia submission for MLPerf inference 3.0. We'll be updating our instructions for easier reproducibility of these numbers including CM scripts for flashing the L4T image and rebuilding the kernel for 64k pagesize.
+
+
+| Workload | Results | L4T | PAGESIZE | Power Mode | FAN Dynamic Speed control | Offline Accuracy | Offline Performance | SingleStream Accuracy | SingleStream Performance | MultiStream Accuracy | MultiStream Performance |
+| --------- | --------------------------------- | ----- | -------- | ---------- | ------------------------- | ---------------- | ------------------- | --------------------- | ------------------------ | -------------------- | ----------------------- |
+| ResNet50 | Nvidia Submitted (docker) | r35.3 | 64k | MaxN | active | 75.934 | 6438.1 | 76.032 | 0.633479 | 76.032 | 2.187731 |
+| ResNet50 | cTuning Submitted | r34.1.1 | 4k | MaxN | active | 75.934 | 4697 | 76.032 | 0.72 | 76.032 | 2.57 |
+| ResNet50 | MLCommons taskforce on reproducibility | r35.2.1 | 4k | MaxN | active | 75.85 | 6172 | 76.056 | 0.644 | 76.056 | 2.074 |
+| ResNet50 | MLCommons taskforce on reproducibility | r35.3 | 64k | MaxN | active | 75.85 | 6430 | 76.056 | 0.659 | 76.056 | 2.20 |
+| RetinaNet | Nvidia Submitted (docker) | r35.3 | x | MaxN | active | 37.372 | 92.4048 | 37.403 | 13.924457 | 37.519 | 104.680313 |
+| RetinaNet | MLCommons taskforce on reproducibility | r35.2.1 | 4k | MaxN | active | 37.346 | 80.0854 (no DLA) | 37.350 | 14,19 | 37.409 | 105.344828 |
+| RetinaNet | MLCommons taskforce on reproducibility | r35.3 | 64k | MaxN | active | 37.345 | 94.6886 | 37.340 | 14.073 | 37.488 | 103.8 |
+| BERT | Nvidia Submitted (docker) | r35.3 | x | MaxN | active | 90.552 | 544.243 | 90.344 | 5.635431 | NA | NA |
+| BERT | cTuning Submitted | r34.1.1 | 4k | MaxN | active | 90.552 | 449.96 | 90.344 | 7.8 | NA | NA |
+| BERT | MLCommons taskforce on reproducibility | r35.2.1 | 4k | MaxN | active | 90.562 | 527 (128 batchsize) | 90.311 | 6.636 | NA | NA |
+| BERT | MLCommons taskforce on reproducibility | r35.3 | 64k | MaxN | active | 90.552 | 539 | 90.344 | 6.31 | NA | NA |
+
+
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-amazon-inferentia-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-amazon-inferentia-2023/README.md
new file mode 100644
index 0000000000..b72349ad59
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-amazon-inferentia-2023/README.md
@@ -0,0 +1,31 @@
+### Challenge
+
+Develop a reference implementation of any MLPerf inference benchmark to run on Amazon Inferentia.
+Submit preliminary (unoptimized) benchmarking results to MLPerf inference v3.1 and beyond.
+
+Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
+to run reference implementations of MLPerf inference benchmarks
+using the CM automation language and use them as a base for your developments.
+
+Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision.
+
+
+### Prizes
+
+* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).*
+* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*.
+
+
+
+### Organizers
+
+* [MLCommons](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation)
+* [cKnowledge.org](https://www.linkedin.com/company/cknowledge)
+
+### Results
+
+All accepted results will be publicly available in the CM format with derived metrics
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
+in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
+and at official [MLCommons website](https://mlcommons.org).
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-amazon-inferentia-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-amazon-inferentia-2023/_cm.json
new file mode 100644
index 0000000000..66431963a5
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-amazon-inferentia-2023/_cm.json
@@ -0,0 +1,27 @@
+{
+ "alias": "optimize-mlperf-inference-v3.1-amazon-inferentia-2023",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close": "20230817",
+ "date_close_extension": true,
+ "date_open": "20230704",
+ "points":3,
+ "trophies":true,
+ "tags": [
+ "modularize",
+ "optimize",
+ "reproduce",
+ "replicate",
+ "benchmark",
+ "automate",
+ "inferentia",
+ "mlperf-inference",
+ "mlperf-inference-inferentia",
+ "mlperf-inference-inferentia",
+ "mlperf-inference-inferentia-v3.1",
+ "mlperf-inference-inferentia-v3.1-2023",
+ "v3.1"
+ ],
+ "title": "Develop a reference implementation of any MLPerf inference benchmark to run on Amazon Inferentia and submit to MLPerf inference v3.1+",
+ "uid": "c8f2573320424e2a"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-create-end-to-end-app/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-create-end-to-end-app/README.md
new file mode 100644
index 0000000000..c08847da6a
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-create-end-to-end-app/README.md
@@ -0,0 +1,20 @@
+### Challenge
+
+Create any end-to-end AI application with web cam, speech recognition, chat bot, LLM
+that uses any MLPerf model and CM automation.
+
+Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision.
+
+Looking forward to your submissions and happy hacking!
+
+### Prizes
+
+* *All submitters will receive 1 point for submitting valid results for 1 complete benchmark on one system.*
+* *All submitters will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).*
+
+
+### Organizers
+
+* [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning foundation](https://cTuning.org)
+
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-create-end-to-end-app/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-create-end-to-end-app/_cm.json
new file mode 100644
index 0000000000..23fb64d835
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-create-end-to-end-app/_cm.json
@@ -0,0 +1,26 @@
+{
+ "alias": "optimize-mlperf-inference-v3.1-create-end-to-end-app",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_open": "20230704",
+ "date_close_extension": true,
+ "points":3,
+ "trophies":true,
+ "tags": [
+ "modularize",
+ "optimize",
+ "reproduce",
+ "replicate",
+ "automate",
+ "benchmark",
+ "end-to-end-app",
+ "mlperf-inference",
+ "mlperf-inference-end-to-end-app",
+ "mlperf-inference-end-to-end-app",
+ "mlperf-inference-end-to-end-app-v3.1",
+ "mlperf-inference-end-to-end-app-v3.1-2023",
+ "v3.1"
+ ],
+ "title": "Generate end-to-end optimized AI apps (LLM, speech, etc) based on MLPerf inference results (with and without container)",
+ "uid": "96ca61a5aa914063"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-deepsparse/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-deepsparse/README.md
new file mode 100644
index 0000000000..f0f8908d29
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-deepsparse/README.md
@@ -0,0 +1,31 @@
+### Challenge
+
+Prepare, optimize and submit benchmarking results to MLPerf inference v3.1 using
+CM automation language with the DeepSparse library, any model and any platform.
+
+Check [this related challenge](https://access.cknowledge.org/playground/?action=challenges&name=3e971d8089014d1f) for more details.
+
+Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
+to run reference implementations of MLPerf inference benchmarks
+using the CM automation language and use them as a base for your developments.
+
+Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision.
+
+### Prizes
+
+* *All contributors will receive 1 point for submitting valid results for 1 complete benchmark on one system.*
+* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).*
+* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*.
+
+### Organizers
+
+* [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning foundation](https://cTuning.org)
+* [cKnowledge Ltd](https://cKnowledge.org)
+
+### Results
+
+All accepted results will be publicly available in the CM format with derived metrics
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
+in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
+and at official [MLCommons website](https://mlcommons.org).
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-deepsparse/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-deepsparse/_cm.json
new file mode 100644
index 0000000000..e1cc4f8880
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-deepsparse/_cm.json
@@ -0,0 +1,28 @@
+{
+ "alias": "optimize-mlperf-inference-v3.1-deepsparse",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close": "20230817",
+ "date_close_extension": true,
+ "date_open": "20230704",
+ "experiments": [],
+ "points": 1,
+ "tags": [
+ "modularize",
+ "optimize",
+ "reproduce",
+ "replicate",
+ "automate",
+ "benchmark",
+ "deepsparse",
+ "mlperf-inference",
+ "mlperf-inference-deepsparse",
+ "mlperf-inference-deepsparse",
+ "mlperf-inference-deepsparse-v3.1",
+ "mlperf-inference-deepsparse-v3.1-2023",
+ "v3.1"
+ ],
+ "title": "Run and optimize MLPerf inference v3.1 benchmarks with Neural Magic's DeepSparse library",
+ "trophies": true,
+ "uid": "c495863b08e74abc"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-google-tpu-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-google-tpu-2023/README.md
new file mode 100644
index 0000000000..94fad05b51
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-google-tpu-2023/README.md
@@ -0,0 +1,32 @@
+### Challenge
+
+Develop a reference implementation of any MLPerf inference benchmark to run on the latest publicly available Google TPU.
+Submit preliminary (unoptimized) benchmarking results to MLPerf inference v3.1 and beyond.
+
+Note that you can use either GCP TPU or Coral TPU USB-Accelerator CPU card.
+In the latter case, you can reuse and extend our CM-MLPerf script for MobileNets!
+
+Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
+to run reference implementations of MLPerf inference benchmarks
+using the CM automation language and use them as a base for your developments.
+
+Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision.
+
+### Prizes
+
+* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).*
+* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*.
+
+### Organizers
+
+* [MLCommons](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation)
+* [cKnowledge.org](https://www.linkedin.com/company/cknowledge)
+
+
+### Results
+
+All accepted results will be publicly available in the CM format with derived metrics
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
+in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
+and at official [MLCommons website](https://mlcommons.org).
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-google-tpu-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-google-tpu-2023/_cm.json
new file mode 100644
index 0000000000..3d5aecc950
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-google-tpu-2023/_cm.json
@@ -0,0 +1,27 @@
+{
+ "alias": "optimize-mlperf-inference-v3.1-google-tpu-2023",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close": "20230817",
+ "date_open": "20230704",
+ "points":3,
+ "trophies":true,
+ "date_close_extension": true,
+ "tags": [
+ "modularize",
+ "optimize",
+ "reproduce",
+ "replicate",
+ "automate",
+ "benchmark",
+ "tpu",
+ "mlperf-inference",
+ "mlperf-inference-tpu",
+ "mlperf-inference-tpu",
+ "mlperf-inference-tpu-v3.1",
+ "mlperf-inference-tpu-v3.1-2023",
+ "v3.1"
+ ],
+ "title": "Develop a reference implementation of any MLPerf inference benchmark to run on the latest publicly available Google TPU (GCP or Coral USB accelerator) and submit to MLPerf inference v3.1+",
+ "uid": "5975fd0e18cd4073"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-hugging-face-models-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-hugging-face-models-2023/README.md
new file mode 100644
index 0000000000..014f83f7d9
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-hugging-face-models-2023/README.md
@@ -0,0 +1,52 @@
+### Introduction
+
+Open-source [MLPerf inference benchmarks](https://arxiv.org/abs/1911.02549)
+were developed by a [consortium of 50+ companies and universities (MLCommons)](https://mlcommons.org)
+to enable trustable and reproducible comparison of AI/ML systems
+in terms of latency, throughput, power consumption, accuracy and other metrics
+across diverse software/hardware stacks from different vendors.
+
+However, it is difficult to customize and run MLPerf benchmarks with non-reference models.
+
+That's why the MLCommons Task Force on automation and reproducibility has developed
+a [Collective Mind automation language](https://doi.org/10.5281/zenodo.8144274)
+to modularize this benchmark and make it easier to run with different models and data sets.
+
+
+### Challenge
+
+Implement a CM workflow to connect any Hugging Face model
+to MLPerf loadgen and run it with random inputs to obtain a preliminary latency and througput
+without accuracy.
+
+Resources:
+* [CM script to get ML model from Hugging Face zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-huggingface-zoo)
+* [CM script to convert Hugging Face model to ONNX](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/convert-ml-model-huggingface-to-onnx)
+* [CM script to build MLPerf loadgen](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-loadgen)
+* [CM script to run Python Loadgen with any ONNX model](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-loadgen-generic-python/README-extra.md)
+* [MLPerf BERT FP32 model is available at Hugging Face](https://huggingface.co/ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1)
+
+Some results showcases CK workflow to benchmark Hugging Face models with MLPerf from v3.0 (BERT):
+* https://access.cknowledge.org/playground/?action=experiments&name=2f1f70d8b2594149
+* https://access.cknowledge.org/playground/?action=experiments&name=mlperf-inference--v3.0--edge--open-power--language-processing--offline&result_uid=9d2594448bbb4b45
+
+Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
+to run reference implementations of MLPerf inference benchmarks
+using the CM automation language and use them as a base for your developments.
+
+Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision.
+
+### Prizes
+
+* *All contributors will receive 1 point for submitting valid results for 1 complete benchmark on one system.*
+* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).*
+* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*.
+
+
+### Organizers
+
+* [MLCommons](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation)
+* [cKnowledge.org](https://www.linkedin.com/company/cknowledge)
+
+
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-hugging-face-models-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-hugging-face-models-2023/_cm.json
new file mode 100644
index 0000000000..146505b55a
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-hugging-face-models-2023/_cm.json
@@ -0,0 +1,27 @@
+{
+ "alias": "optimize-mlperf-inference-v3.1-hugging-face-models-2023",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close": "20230817",
+ "date_close_extension": true,
+ "date_open": "20230704",
+ "points":3,
+ "trophies":true,
+ "tags": [
+ "modularize",
+ "optimize",
+ "reproduce",
+ "replicate",
+ "automate",
+ "benchmark",
+ "huggingface",
+ "mlperf-inference",
+ "mlperf-inference-huggingface",
+ "mlperf-inference-huggingface",
+ "mlperf-inference-huggingface-v3.1",
+ "mlperf-inference-huggingface-v3.1-2023",
+ "v3.1"
+ ],
+ "title": "Implement CM automation to run benchmark Hugging Face models using MLPerf loadgen",
+ "uid": "72b95d08a9e04698"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-intel-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-intel-2023/README.md
new file mode 100644
index 0000000000..aec0514730
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-intel-2023/README.md
@@ -0,0 +1,31 @@
+### Challenge
+
+Add CM interface to run MLPerf inference benchmarks on Intel-based platforms.
+
+You can start from reproducing any past MLPerf inference submission from Intel and their partners
+and then adding CM automation.
+
+Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
+to run reference implementations of MLPerf inference benchmarks
+using the CM automation language and use them as a base for your developments.
+
+Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision.
+
+### Prizes
+
+* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).*
+* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*.
+
+
+### Organizers
+
+* [MLCommons](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation)
+* [cKnowledge.org](https://www.linkedin.com/company/cknowledge)
+
+### Results
+
+All accepted results will be publicly available in the CM format with derived metrics
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
+in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
+and at official [MLCommons website](https://mlcommons.org).
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-intel-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-intel-2023/_cm.json
new file mode 100644
index 0000000000..c3d9adbe4c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-intel-2023/_cm.json
@@ -0,0 +1,26 @@
+{
+ "alias": "optimize-mlperf-inference-v3.1-intel-2023",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close": "20240104",
+ "date_open": "20230704",
+ "points": 2,
+ "tags": [
+ "modularize",
+ "optimize",
+ "reproduce",
+ "replicate",
+ "automate",
+ "benchmark",
+ "intel",
+ "mlperf-inference",
+ "mlperf-inference-intel",
+ "mlperf-inference-intel",
+ "mlperf-inference-intel-v3.1",
+ "mlperf-inference-intel-v3.1-2023",
+ "v3.1"
+ ],
+ "title": "Add the CM interface to run MLPerf inference benchmarks on Intel-based platforms",
+ "trophies": true,
+ "uid": "1c1d5da6766f4afb"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-modular-mojo-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-modular-mojo-2023/README.md
new file mode 100644
index 0000000000..6aaf4e3947
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-modular-mojo-2023/README.md
@@ -0,0 +1,34 @@
+### Challenge
+
+Add support to run a reference implementation of any MLPerf inference benchmark using
+[Mojo language]( https://github.com/modularml/mojo )
+from [Modular.ai](https://modular.ai).
+
+Prepare, optimize and submit benchmarking results to MLPerf inference v3.1 with Mojo.
+
+Check [this related challenge](https://access.cknowledge.org/playground/?action=challenges&name=3e971d8089014d1f) for more details.
+
+Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
+to run reference implementations of MLPerf inference benchmarks
+using the CM automation language and use them as a base for your developments.
+
+Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision.
+
+### Prizes
+
+* *The first implementation will receive a cache prize from organizers.*
+* *All contributors will receive 1 point for submitting valid results for 1 complete benchmark on one system.*
+* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*.
+
+### Organizers
+
+* [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning foundation](https://cTuning.org)
+* [cKnowledge Ltd](https://cKnowledge.org)
+
+### Results
+
+All accepted results will be publicly available in the CM format with derived metrics
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
+in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
+and at official [MLCommons website](https://mlcommons.org).
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-modular-mojo-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-modular-mojo-2023/_cm.json
new file mode 100644
index 0000000000..e805879dee
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-modular-mojo-2023/_cm.json
@@ -0,0 +1,28 @@
+{
+ "alias": "optimize-mlperf-inference-v3.1-modular-mojo-2023",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close": "20230817",
+ "date_close_extension": true,
+ "date_open": "20230704",
+ "experiments": [],
+ "points": 1,
+ "tags": [
+ "modularize",
+ "optimize",
+ "reproduce",
+ "replicate",
+ "automate",
+ "benchmark",
+ "mojo",
+ "mlperf-inference",
+ "mlperf-inference-mojo",
+ "mlperf-inference-mojo",
+ "mlperf-inference-mojo-v3.1",
+ "mlperf-inference-mojo-v3.1-2023",
+ "v3.1"
+ ],
+ "title": "Run reference implementations of MLperf inference benchmarks using Mojo language from Modular.ai",
+ "trophies": true,
+ "uid": "0a8a7bb5572447db"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/README.md
new file mode 100644
index 0000000000..c16a9335a6
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/README.md
@@ -0,0 +1,33 @@
+### Challenge
+
+Add CM interface to run MLPerf inference benchmarks on Qualcomm AI100-based platforms.
+
+You can start from reproducing any past submission from Dell, Lenovo or HPE
+and then adding CM automation.
+
+Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
+to run reference implementations of MLPerf inference benchmarks
+using the CM automation language and use them as a base for your developments.
+
+Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision.
+
+### Prizes
+
+* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).*
+* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*.
+
+
+
+### Organizers
+
+* [MLCommons](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation)
+* [cKnowledge.org](https://www.linkedin.com/company/cknowledge)
+
+
+### Results
+
+All accepted results will be publicly available in the CM format with derived metrics
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
+in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
+and at official [MLCommons website](https://mlcommons.org).
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/_cm.json
new file mode 100644
index 0000000000..07c626e259
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/_cm.json
@@ -0,0 +1,26 @@
+{
+ "alias": "optimize-mlperf-inference-v3.1-qualcomm-ai100-2023",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close": "20240104",
+ "date_open": "20230704",
+ "points":3,
+ "trophies":true,
+ "tags": [
+ "modularize",
+ "optimize",
+ "reproduce",
+ "replicate",
+ "automate",
+ "benchmark",
+ "ai100",
+ "mlperf-inference",
+ "mlperf-inference-ai100",
+ "mlperf-inference-ai100",
+ "mlperf-inference-ai100-v3.1",
+ "mlperf-inference-ai100-v3.1-2023",
+ "v3.1"
+ ],
+ "title": "Add the CM interface to run MLPerf inference benchmarks on Qualcomm AI100-based platforms",
+ "uid": "09bd5f9e05ff46b1"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-tvm-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-tvm-2023/README.md
new file mode 100644
index 0000000000..f8d9fbd71b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-tvm-2023/README.md
@@ -0,0 +1,41 @@
+### Challenge
+
+Prepare, optimize and submit benchmarking results to MLPerf inference v3.1 using
+CM automation language with Apache TVM, any model and any platform.
+
+Check [this related challenge](https://access.cknowledge.org/playground/?action=challenges&name=3e971d8089014d1f) for more details.
+
+Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
+to run reference implementations of MLPerf inference benchmarks
+using the CM automation language and use them as a base for your developments.
+
+Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision.
+
+### Prizes
+
+* *All contributors will receive 1 point for submitting valid results for 1 complete benchmark on one system.*
+* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).*
+* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*.
+
+### Organizers
+
+* [Deelvin](https://deelvin.com)
+* [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning foundation](https://cTuning.org)
+* [cKnowledge Ltd](https://cKnowledge.org)
+
+### Status
+
+This challenge is under preparation.
+
+* https://github.com/mlcommons/ck/pull/693
+* https://github.com/mlcommons/ck/pull/700
+* https://github.com/mlcommons/ck/pull/701
+
+
+### Results
+
+All accepted results will be publicly available in the CM format with derived metrics
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
+in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
+and at official [MLCommons website](https://mlcommons.org).
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-tvm-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-tvm-2023/_cm.json
new file mode 100644
index 0000000000..839fb6b86e
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-tvm-2023/_cm.json
@@ -0,0 +1,28 @@
+{
+ "alias": "optimize-mlperf-inference-v3.1-tvm-2023",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close": "20230817",
+ "date_close_extension": true,
+ "date_open": "20230704",
+ "points":1,
+ "trophies":true,
+ "experiments": [],
+ "tags": [
+ "modularize",
+ "optimize",
+ "reproduce",
+ "replicate",
+ "automate",
+ "benchmark",
+ "tvm",
+ "mlperf-inference",
+ "mlperf-inference-tvm",
+ "mlperf-inference-tvm",
+ "mlperf-inference-tvm-v3.1",
+ "mlperf-inference-tvm-v3.1-2023",
+ "v3.1"
+ ],
+ "title": "Run and optimize MLPerf inference v3.1 benchmarks with Apache TVM",
+ "uid": "29c416e245884746"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-universal-cpp-implementation-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-universal-cpp-implementation-2023/README.md
new file mode 100644
index 0000000000..0a5fe9aa2c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-universal-cpp-implementation-2023/README.md
@@ -0,0 +1,31 @@
+### Challenge
+
+Add more models and hardware backends to the [universal C++ implementation of MLPerf inference benchmarks)](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-cpp)
+being developed by the [MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md).
+
+Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
+to run reference implementations of MLPerf inference benchmarks
+using the CM automation language and use them as a base for your developments.
+
+Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision.
+
+
+### Prizes
+
+* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).*
+* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*.
+
+
+
+### Organizers
+
+* [MLCommons](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation)
+* [cKnowledge.org](https://www.linkedin.com/company/cknowledge)
+
+### Results
+
+All accepted results will be publicly available in the CM format with derived metrics
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
+in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
+and at official [MLCommons website](https://mlcommons.org).
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-universal-cpp-implementation-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-universal-cpp-implementation-2023/_cm.json
new file mode 100644
index 0000000000..e4e5cae105
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-universal-cpp-implementation-2023/_cm.json
@@ -0,0 +1,27 @@
+{
+ "alias": "optimize-mlperf-inference-v3.1-universal-cpp-implementation-2023",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close": "20230817",
+ "date_close_extension": true,
+ "date_open": "20230704",
+ "points": 2,
+ "tags": [
+ "modularize",
+ "optimize",
+ "reproduce",
+ "replicate",
+ "automate",
+ "benchmark",
+ "cpp",
+ "mlperf-inference",
+ "mlperf-inference-cpp",
+ "mlperf-inference-cpp",
+ "mlperf-inference-cpp-v3.1",
+ "mlperf-inference-cpp-v3.1-2023",
+ "v3.1"
+ ],
+ "title": "Add more models and hardware backends to the universal C++ implementation of MLPerf inference benchmarks from MLCommons",
+ "trophies": true,
+ "uid": "518420b0e6dd4fed"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-windows-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-windows-2023/README.md
new file mode 100644
index 0000000000..d587f62f89
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-windows-2023/README.md
@@ -0,0 +1,36 @@
+### Challenge
+
+Prepare, optimize and submit any benchmarking results to MLPerf inference v3.1 using
+CM automation language on Windows.
+
+Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
+to run reference implementations of MLPerf inference benchmarks
+using the CM automation language and use them as a base for your developments.
+
+Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision.
+
+
+### Prizes
+
+* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).*
+* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*.
+
+
+### Organizers
+
+* [MLCommons](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation)
+* [cKnowledge.org](https://www.linkedin.com/company/cknowledge)
+
+
+### Status
+
+Open ticket: [GitHub](https://github.com/mlcommons/ck/issues/696)
+
+
+### Results
+
+All accepted results will be publicly available in the CM format with derived metrics
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
+in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
+and at official [MLCommons website](https://mlcommons.org).
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-windows-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-windows-2023/_cm.json
new file mode 100644
index 0000000000..1a55dcbe0f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-windows-2023/_cm.json
@@ -0,0 +1,28 @@
+{
+ "alias": "optimize-mlperf-inference-v3.1-windows-2023",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close": "20230817",
+ "date_close_extension": true,
+ "date_open": "20230704",
+ "points":2,
+ "trophies":true,
+ "experiments": [],
+ "tags": [
+ "modularize",
+ "optimize",
+ "reproduce",
+ "replicate",
+ "automate",
+ "benchmark",
+ "windows",
+ "mlperf-inference",
+ "mlperf-inference-windows",
+ "mlperf-inference-windows",
+ "mlperf-inference-windows-v3.1",
+ "mlperf-inference-windows-v3.1-2023",
+ "v3.1"
+ ],
+ "title": "Run and optimize MLPerf inference v3.1 benchmarks on Windows",
+ "uid": "53e56d714c7649c7"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inf-v3.0-orin/README.md b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inf-v3.0-orin/README.md
new file mode 100644
index 0000000000..54dd4feeb0
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inf-v3.0-orin/README.md
@@ -0,0 +1,16 @@
+### Challenge
+
+Reproduce MLPerf inference v3.0 benchmark results for Nvidia Jetson Orin
+(performance, accuracy,power) and automate it using the
+[MLCommons CK framework](https://github.com/mlcommons/ck).
+
+### Organizers
+
+* [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning foundation](https://cTuning.org)
+* [cKnowledge](https://cKnowledge.org)
+
+### Status
+
+Finished. Preliminary results are available [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-nvidia-jetson-orin.md).
+
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inf-v3.0-orin/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inf-v3.0-orin/_cm.json
new file mode 100644
index 0000000000..aff0fdba0f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inf-v3.0-orin/_cm.json
@@ -0,0 +1,23 @@
+{
+ "alias": "repro-mlperf-inf-v3.0-orin",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close": "20230406",
+ "date_open": "20230301",
+ "experiments": [
+ {
+ "tags": "mlperf-inference,v3.0"
+ }
+ ],
+ "_password_hash": "$2b$12$ionIRWe5Ft7jkn4y/7C6/eYoo6uBBMkGy/9SxwtKhaDRqZ1w2s3dO",
+ "tags": [
+ "reproduce",
+ "replicate",
+ "automate",
+ "orin",
+ "nvidia",
+ "mlperf-inference-v3.0-orin"
+ ],
+ "title": "Reproduce MLPerf inference v3.0 results for Nvidia Jetson Orin",
+ "uid": "6d377c1a1b224636"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-retinanet-scc2022/README.md b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-retinanet-scc2022/README.md
new file mode 100644
index 0000000000..9917547c15
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-retinanet-scc2022/README.md
@@ -0,0 +1,39 @@
+### Challenge
+
+Reproduce the MLPerf inference RetinaNet benchmark during Student Cluster Competition at SuperComputing'22
+using the following [CM tutorial](https://github.com/mlcommons/ck/blob/master/docs/tutorials/sc22-scc-mlperf.md).
+
+### Organizers
+
+* [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning foundation](https://cTuning.org)
+* [OctoML](https://octoml.ai)
+
+### Status
+
+This challenge has been successfully completed.
+
+### Results
+
+Results from 10 international student teams are available at:
+* [W&B dashboard 1 (during SCC'22)](https://wandb.ai/cmind/cm-mlperf-sc22-scc-retinanet-offline/table?workspace=user-gfursin)
+* [W&B dashboard 2 (after SCC'22)](https://wandb.ai/cmind/cm-mlperf-dse-testing/table?workspace=user-gfursin)
+
+
+### Acknowledgments
+
+We thank
+[Hai Ah Nam](https://www.nersc.gov/about/nersc-staff/advanced-technologies-group/hai-ah-nam),
+[Steve Leak](https://www.linkedin.com/in/steve-leak),
+[Vijay Janappa Reddi](https://scholar.harvard.edu/vijay-janapa-reddi/home),
+[Tom Jablin](https://scholar.google.com/citations?user=L_1FmIMAAAAJ&hl=en),
+[Ramesh N Chukka](https://www.linkedin.com/in/ramesh-chukka-74b5b21),
+[Peter Mattson](https://www.linkedin.com/in/peter-mattson-33b8863/),
+[David Kanter](https://www.linkedin.com/in/kanterd),
+[Pablo Gonzalez Mesa](https://www.linkedin.com/in/pablo-gonzalez-mesa-952ab2207),
+[Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189),
+[Thomas Schmid](https://www.linkedin.com/in/tschmid)
+and [Gaurav Verma](https://www.linkedin.com/in/grverma)
+for their suggestions and contributions.
+
+
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-retinanet-scc2022/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-retinanet-scc2022/_cm.json
new file mode 100644
index 0000000000..68352f9c3b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-retinanet-scc2022/_cm.json
@@ -0,0 +1,20 @@
+{
+ "alias": "repro-mlperf-inference-retinanet-scc2022",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close": "20221201",
+ "date_open": "20221101",
+ "tags": [
+ "modularize",
+ "reproduce",
+ "replicate",
+ "automate",
+ "benchmark",
+ "mlperf",
+ "mlperf-inference",
+ "mlperf-inference-scc",
+ "mlperf-inference-scc-2022"
+ ],
+ "title": "Automate MLPerf RetinaNet benchmark at the Student Cluster Competition at SuperComputing'22 using CM",
+ "uid": "e71fa8b396874e68"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.0-2024/README.md b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.0-2024/README.md
new file mode 100644
index 0000000000..af23eb1205
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.0-2024/README.md
@@ -0,0 +1,3 @@
+The [MLCommons](https://mlcommons.org), [cTuning foundation](https://cTuning.org) and [cKnowledge.org](https://cKnowledge.org)
+are preparing a unified interface to reproduce results from the MLPerf inference benchmark submission v4.0.
+Please feel free to join the testing phase using [GitHub issues](https://github.com/mlcommons/ck/issues)!
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.0-2024/_cm.yaml b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.0-2024/_cm.yaml
new file mode 100644
index 0000000000..01bcfd52a7
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.0-2024/_cm.yaml
@@ -0,0 +1,25 @@
+alias: repro-mlperf-inference-v4.0-2024
+uid: e6b8738383eb46d0
+
+automation_alias: challenge
+automation_uid: 3d84abd768f34e08
+
+title: Reproduce and automate MLPerf inference benchmark results v4.0 from different vendors (Intel, Nvidia, Qualcomm, Google, NeuralMagic, ...) using CM
+
+date_open: '20240201'
+
+tags:
+- modularize
+- optimize
+- reproduce
+- replicate
+- automate
+- benchmark
+- mlperf
+- mlperf-inference
+- mlperf-inference-v4.0
+- mlperf-inference-v4.0-2024
+- v4.0
+
+experiments:
+- tags: mlperf-inference,v4.0
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.1-2024/README.md b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.1-2024/README.md
new file mode 100644
index 0000000000..1aacc2d59d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.1-2024/README.md
@@ -0,0 +1,4 @@
+The [cTuning foundation](https://cTuning.org), [cKnowledge.org](https://cKnowledge.org) and [MLCommons](https://mlcommons.org)
+are preparing an open reproducibility challenge to reproduce various results from the MLPerf inference benchmark v4.1
+using the MLCommons CM automation framework. Please stay tuned for more details!
+
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.1-2024/_cm.yaml b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.1-2024/_cm.yaml
new file mode 100644
index 0000000000..840d58318d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.1-2024/_cm.yaml
@@ -0,0 +1,22 @@
+alias: repro-mlperf-inference-v4.1-2024
+uid: 2093f4d750144df4
+
+automation_alias: challenge
+automation_uid: 3d84abd768f34e08
+
+title: 'Reproduce the upcoming MLPerf inference benchmark v4.1 results'
+
+date_open: '20240901'
+
+tags:
+- modularize
+- optimize
+- reproduce
+- replicate
+- automate
+- benchmark
+- mlperf
+- mlperf-inference
+- mlperf-inference-v4.1
+- mlperf-inference-v4.1-2024
+- v4.1
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/reproduce-and-automate-tinymlperf-v1.1-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/reproduce-and-automate-tinymlperf-v1.1-2023/README.md
new file mode 100644
index 0000000000..0f59f59f0e
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/reproduce-and-automate-tinymlperf-v1.1-2023/README.md
@@ -0,0 +1,36 @@
+### Challenge
+
+Reproduce and automate [TinyMLPerf benchmarks](https://github.com/mlcommons/tiny).
+
+### Organizers
+
+* [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning foundation](https://cTuning.org)
+* [cKnowledge Ltd](https://cKnowledge.org)
+
+### Status
+
+We have successfully reproduced [TinyMLPerf v1.0 submission with microTVM on the STMicroelectronics NUCLEO-L4R5ZI board](https://github.com/mlcommons/tiny_results_v1.0/tree/main/closed/OctoML),
+automated it with the latest version of the [MLCommons CM automation language](https://github.com/mlcommons/ck/blob/master/docs/README.md),
+submit reproduce results to the TinyMLperf v1.1 round,
+and added all past TinyMLPerf results to the [MLCommons CK playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-tiny)
+for further collaborative analysis and improvement.
+
+Pleases check our tutorial and reproducibility report:
+* [Automate TinyMLPerf benchmark](https://github.com/ctuning/mlcommons-ck/blob/master/docs/tutorials/automate-mlperf-tiny.md) - useful for all SW/HW stacks and submission rounds.
+* [Reproduce TinyMLPerf v1.0 submission](https://github.com/ctuning/mlcommons-ck/blob/master/docs/tutorials/reproduce-mlperf-tiny.md).
+
+TinyMLPerf v1.1 results will be published at te [MLCommons CK playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-tiny)
+in mid June 2023.
+
+### Related discussions for the future
+
+* https://github.com/mlcommons/ck/pull/693
+* https://github.com/mlcommons/ck/pull/700
+* https://github.com/mlcommons/ck/pull/701
+* https://github.com/mlcommons/ck/issues/606
+
+### Results
+
+All results will be available in [this GitHub repo](https://github.com/ctuning/cm4mlperf-results)
+and can be visualized and compared using the [MLCommons Collective Knowledge Playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-tiny).
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/reproduce-and-automate-tinymlperf-v1.1-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/reproduce-and-automate-tinymlperf-v1.1-2023/_cm.json
new file mode 100644
index 0000000000..4e9e248505
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/reproduce-and-automate-tinymlperf-v1.1-2023/_cm.json
@@ -0,0 +1,23 @@
+{
+ "alias": "reproduce-and-automate-tinymlperf-v1.1-2023",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close": "20230519",
+ "date_open": "20230501",
+ "experiments": [],
+ "tags": [
+ "modularize",
+ "automate",
+ "reproduce",
+ "replicate",
+ "optimize",
+ "benchmark",
+ "tinymlperf",
+ "tinymlperf-inference",
+ "tinymlperf-inference-v3.0",
+ "tinymlperf-inference-v3.0-2023",
+ "v1.0"
+ ],
+ "title": "Reproduce and optimize TinyMLPerf inference v1.1 benchmarks",
+ "uid": "d98cd66e0e5641f7"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/reproduce-mlperf-training-v3.0-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/reproduce-mlperf-training-v3.0-2023/README.md
new file mode 100644
index 0000000000..a1f1ea22ac
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/reproduce-mlperf-training-v3.0-2023/README.md
@@ -0,0 +1,17 @@
+### Challenge
+
+Prepare, optimize and reproduce MLPerf training v3.0 benchmarks
+using the [MLCommons CM (CK2) automation framework](https://github.com/mlcommons/ck)
+
+### Status
+
+We could not do a successful submission mainly because the training scripts were not converging on a single GPU. We tried resnet and bert training. The below CM scripts are added to do MLPerf training for BERT using the reference and NVIDIA implementations.
+
+1. [BERT Training using Nvidia code](https://github.com/ctuning/mlcommons-ck/tree/master/cm-mlops/script/app-mlperf-training-nvidia)
+2. [BERT Training using MLPerf Reference code](https://github.com/ctuning/mlcommons-ck/tree/master/cm-mlops/script/app-mlperf-training-reference)
+
+### Organizers
+
+* [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning foundation](https://cTuning.org)
+* [cKnowledge](https://cKnowledge.org)
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/reproduce-mlperf-training-v3.0-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/reproduce-mlperf-training-v3.0-2023/_cm.json
new file mode 100644
index 0000000000..d1e5eddea8
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/reproduce-mlperf-training-v3.0-2023/_cm.json
@@ -0,0 +1,23 @@
+{
+ "alias": "reproduce-mlperf-training-v3.0-2023",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close": "20230519",
+ "date_open": "20230501",
+ "experiments": [],
+ "tags": [
+ "modularize",
+ "optimize",
+ "reproduce",
+ "replicate",
+ "automate",
+ "benchmark",
+ "mlperf",
+ "mlperf-training",
+ "mlperf-training-v3.0",
+ "mlperf-training-v3.0-2023",
+ "v3.0"
+ ],
+ "title": "Reproduce MLPerf training v3.0 benchmarks",
+ "uid": "1d26149c1cce4da3"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/README.md b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/README.md
new file mode 100644
index 0000000000..bd734f7896
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/README.md
@@ -0,0 +1,67 @@
+### Introduction
+
+The goal of this MLPerf@home challenge is to help the community find
+the most efficient CPU (Intel/AMD/Arm) for BERT-99 model with DeepSparse engine
+and different variations of MobileNets/EfficientNets with TFLite
+in terms of latency, throughput, accuracy, number of cores, frequency, memory size, cost, and other metrics.
+
+We would like to ask you to run a few [MLPerf inference benchmarks](https://arxiv.org/abs/1911.02549)
+with BERT and MobileNets/EfficientNets on one or more systems with different CPUs
+that you have an access to: laptops, servers, cloud instances...
+
+You will be able to run benchmarks, collect all metrics and submit results in an automated way
+in a native environment or Docker container using the portable and technology-agnostic
+[MLCommons Collective Mind automation language (CM)](https://doi.org/10.5281/zenodo.8105339).
+
+Your name and benchmark submissions will be published in the official MLCommons inference v3.1 results
+on September 1, 2023 (submission deadline: August 4, 2023),
+will be published in the [official leaderboard](https://access.cknowledge.org/playground/?action=contributors),
+will be included to the prize draw, and will be presented in our upcoming ACM/HiPEAC events.
+
+Please report encountered problems using [GitHub issues](https://github.com/mlcommons/ck)
+to help the community improve CM automation workflows to run MLPerf benchmarks on any system with any software/hardware stack.
+
+Thank you in advance for helping the community find Pareto-efficient AI/ML Systems!
+
+### Minimal requirements
+
+* CPU: Any x86-64 or Arm64
+* OS:
+ * native: any Linux (tested on Ubuntu 22.04)
+ * Docker: any OS
+* Disk space:
+ * BERT-99: ~ 20GB
+ * Different variations of MobileNets/EfficientNets: ~ 140GB
+* Time to run:
+ * BERT-99: ~ 2 hours
+ * Different variations of MobileNets/EfficientNets: ~ 2 days
+
+### Instructions to run benchmarks and submit results
+
+You can run any of these benchmarks or all depending on available time:
+
+* [Automated Design Space Exploration of MobileNets/EfficientNets; TFLite MLPerf implementation; native environment or Docker](https://github.com/mlcommons/ck/blob/master/cm-mlops/challenge/run-mlperf%40home-v3.1-cpu/run-cpu-dse-mobilenets-efficientnets-tflite.md)
+* [BERT-99 model; DeepSparse MLPerf implementation; native environment](https://github.com/mlcommons/ck/blob/master/cm-mlops/challenge/run-mlperf%40home-v3.1-cpu/run-cpu-bert-99-deepsparse.md)
+
+### Results
+
+All accepted results with submitter names will be publicly available
+at the official [MLCommons website](https://mlcommons.org)
+and in the [Collective Knowledge explorer (MLCommons CK)](https://access.cknowledge.org/playground/?action=experiments)
+along with the reproducibility and automation report to help the community
+build efficient AI/ML systems.
+
+
+### Organizers
+
+* [MLCommons Task Force on Automation and Reproducibility](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation)
+* [cKnowledge.org](https://www.linkedin.com/company/cknowledge)
+
+### Advanced challenges
+
+If you feel that running these benchmarks was relatively easy,
+please try [more advanced challenges](https://access.cknowledge.org/playground/?action=challenges),
+read about our [plans and long-term vision](https://doi.org/10.5281/zenodo.8105339),
+check [CM documentation](https://github.com/mlcommons/ck/blob/master/docs/README.md)
+and run other [MLPerf benchmarks](https://github.com/mlcommons/ck/tree/master/docs/mlperf).
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/_cm.json
new file mode 100644
index 0000000000..88f4716cda
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/_cm.json
@@ -0,0 +1,21 @@
+{
+ "alias": "run-mlperf@home-v3.1-cpu",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close": "20230817",
+ "date_open": "20230725",
+ "experiments": [],
+ "points": 2,
+ "sort": -20,
+ "tags": [
+ "run",
+ "mlperf",
+ "inference",
+ "v3.1",
+ "mlperf-inference-v3.1-simple-cpu"
+ ],
+ "title": "Work with the community to find the most efficient CPUs (Intel/AMD/Arm) for BERT and MobileNets/EfficientNets (latency, throughput, accuracy, number of cores, frequency, memory size, cost and other metrics)",
+ "skip": true,
+ "trophies": true,
+ "uid": "498f33f3dac647c1"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/run-cpu-bert-99-deepsparse.md b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/run-cpu-bert-99-deepsparse.md
new file mode 100644
index 0000000000..b4266ffa97
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/run-cpu-bert-99-deepsparse.md
@@ -0,0 +1,100 @@
+# Introduction
+
+This guide will help you automatically run the MLPerf inference benchmark v3.1 with BERT-99 model and DeepSparse engine
+on any Linux-based system with Intel, AMD or Arm CPU.
+
+This benchmark is automated by the MLCommons CM language and you should be able to submit official MLPerf v3.1 inference results
+for offline scenario in open division and edge category.
+
+It will require ~20GB of disk space and can take ~2 hours to run on 1 system.
+
+
+
+
+## Install CM automation language
+
+Install the [MLCommons CM automation language](https://doi.org/10.5281/zenodo.8105339) as described in this [guide](../../../docs/installation.md).
+It is a small Python library with `cm` and `cmr` command line front-ends and minimal dependencies including Python 3+, Git and wget.
+
+If you encounter problems, please report them at [GitHub](https://github.com/mlcommons/ck/issues).
+
+
+## Install repository with CM automations
+
+Install the MLCommons repository with [reusable and portable automation recipes (CM scripts)](https://github.com/mlcommons/ck/tree/master/cm-mlops/script) via CM.
+These scripts are being developed and shared by the community and MLCommons under Apache 2.0 license
+to enable portable, modular, and technology-agnostic benchmarks and applications
+that can automatically run with any software, hardware, models and data sets.
+
+```bash
+cm pull repo mlcommons@ck
+```
+
+You can run it again at any time to pick up the latest updates.
+
+Note that CM will store all such repositories and downloaded/installed data sets, models and tools
+in your `$HOME/CM` directory.
+
+Since MLPerf benchmarks require lots of space (somethings hundreds of Gigabytes),
+you can change the above location to some large scratch disk using `CM_REPOS`
+environment variable as follows:
+
+```bash
+export CM_REPOS={new path to CM repositories and data}
+echo "CM_REPOS=${CM_REPOS} >> $HOME/.bashrc"
+cm pull repo mlcommons@ck
+```
+
+
+
+## Setup virtual environment
+
+We suggest you to setup a Python virtual environment via CM to avoid contaminating your existing Python installation:
+
+```bash
+cm run script "install python-venv" --name=mlperf --version_min=3.8
+export CM_SCRIPT_EXTRA_CMD="--adr.python.name=mlperf"
+```
+
+CM will install a new Python virtual environment in CM cache and will install all Python dependencies there:
+```bash
+cm show cache --tags=python-venv
+```
+
+Note that CM downloads and/or installs models, data sets, packages, libraries and tools in this cache.
+
+You can clean it at any time and start from scratch using the following command:
+```bash
+cm rm cache -f
+```
+
+Alternatively, you can remove specific entries using tags:
+```bash
+cm show cache
+cm rm cache --tags=tag1,tag2,...
+```
+
+
+
+
+### Do a test run to detect and record the system performance
+
+```bash
+cm run script --tags=generate-run-cmds,inference,_find-performance \
+--model=bert-99 --implementation=reference --device=cpu --backend=deepsparse \
+--category=edge --division=open --quiet --scenario=Offline
+```
+
+### Do full accuracy and performance run
+
+```
+cm run script --tags=generate-run-cmds,inference,_submission --model=bert-99 \
+--device=cpu --implementation=reference --backend=deepsparse \
+--execution-mode=valid --results_dir=$HOME/results_dir \
+--category=edge --division=open --quiet --scenario=Offline
+```
+### Generate and upload MLPerf submission
+
+Follow [this guide](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/Submission.md) to generate the submission tree and upload your results.
+
+
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/run-cpu-dse-mobilenets-efficientnets-tflite.md b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/run-cpu-dse-mobilenets-efficientnets-tflite.md
new file mode 100644
index 0000000000..f41b1b463b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/run-cpu-dse-mobilenets-efficientnets-tflite.md
@@ -0,0 +1,77 @@
+# Introduction
+
+This guide will help you automatically run the MLPerf inference benchmark v3.1 with multiple variations of MobileNets and EfficientNets
+and TFLite on any Linux-based system with Intel, AMD or Arm CPU.
+
+This benchmark is automated by the MLCommons CM language and you should be able to submit official MLPerf v3.1 inference results
+for singlestream scenario in open division and edge category.
+
+It will require ~140GB of disk space and can take ~2 days to run on 1 system producing 243 MLPerf results
+during automatic design space exploration to trade off accuracy vs performance.
+
+
+
+## Install CM automation language
+
+Install the [MLCommons CM automation language](https://doi.org/10.5281/zenodo.8105339) as described in this [guide](../../../docs/installation.md).
+It is a small Python library with `cm` and `cmr` command line front-ends and minimal dependencies including Python 3+, Git and wget.
+
+If you encounter problems, please report them at [GitHub](https://github.com/mlcommons/ck/issues).
+
+
+## Install repository with CM automations
+
+Install the MLCommons repository with [reusable and portable automation recipes (CM scripts)](https://github.com/mlcommons/ck/tree/master/cm-mlops/script) via CM.
+These scripts are being developed and shared by the community and MLCommons under Apache 2.0 license
+to enable portable, modular, and technology-agnostic benchmarks and applications
+that can automatically run with any software, hardware, models and data sets.
+
+```bash
+cm pull repo mlcommons@ck
+```
+
+You can run it again at any time to pick up the latest updates.
+
+Note that CM will store all such repositories and downloaded/installed data sets, models and tools
+in your `$HOME/CM` directory.
+
+Since MLPerf benchmarks require lots of space (somethings hundreds of Gigabytes),
+you can change the above location to some large scratch disk using `CM_REPOS`
+environment variable as follows:
+
+```bash
+export CM_REPOS={new path to CM repositories and data}
+echo "CM_REPOS=${CM_REPOS} >> $HOME/.bashrc"
+cm pull repo mlcommons@ck
+```
+
+
+
+## Setup virtual environment
+
+We suggest you to setup a Python virtual environment via CM to avoid contaminating your existing Python installation:
+
+```bash
+cm run script "install python-venv" --name=mlperf --version_min=3.8
+export CM_SCRIPT_EXTRA_CMD="--adr.python.name=mlperf"
+```
+
+CM will install a new Python virtual environment in CM cache and will install all Python dependencies there:
+```bash
+cm show cache --tags=python-venv
+```
+
+Note that CM downloads and/or installs models, data sets, packages, libraries and tools in this cache.
+
+You can clean it at any time and start from scratch using the following command:
+```bash
+cm rm cache -f
+```
+
+Alternatively, you can remove specific entries using tags:
+```bash
+cm show cache
+cm rm cache --tags=tag1,tag2,...
+```
+
+
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/README.md b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/README.md
new file mode 100644
index 0000000000..b6482d3835
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/README.md
@@ -0,0 +1,65 @@
+### Introduction
+
+The goal of this MLPerf@home challenge is to help the community find
+the most efficient Nvidia GPUs for GPT-J 6B model and BERT-99 in terms of
+latency, throughput, accuracy, number of cores, frequency, memory size, cost, and other metrics.
+
+We would like to ask you to run a few [MLPerf inference benchmarks](https://arxiv.org/abs/1911.02549)
+with GPT-J and BERT-99 models on one or more systems with different Nvidia GPUs
+that you have an access to: laptops, servers, cloud instances...
+
+You will be able to run benchmarks, collect all metrics and submit results in an automated way
+in a native environment or Docker container using the portable and technology-agnostic
+[MLCommons Collective Mind automation language (CM)](https://doi.org/10.5281/zenodo.8105339).
+
+Your name and benchmark submissions will be published in the official MLCommons inference v3.1 results
+on September 1, 2023 (**submission deadline: August 17, 2023**),
+will be published in the [official leaderboard](https://access.cknowledge.org/playground/?action=contributors),
+will be included to the prize draw, and will be presented in our upcoming ACM/HiPEAC events.
+
+Please report encountered problems using [GitHub issues](https://github.com/mlcommons/ck)
+to help the community improve CM automation workflows to run MLPerf benchmarks on any system with any software/hardware stack.
+
+Thank you in advance for helping the community find Pareto-efficient AI/ML Systems!
+
+### Minimal requirements
+
+* GPU: Nvidia
+* GPU memory:
+ * GPT-J 6B: min 24GB
+ * BERT-99: min 8..16GB
+* OS:
+ * native: any Linux (tested on Ubuntu 22.04)
+ * Docker: any OS
+ any Linux (tested on Ubuntu 22.04)
+* Disk space: ~30GB per model/data set
+* Time to run:
+ * GPT-J 6B: ~ 1 day
+ * BERT-99: ~ 2 hours
+
+### Instructions to run benchmarks and submit results
+
+* [GPT-J 6B model (24GB min GPU memory); PyTorch+CUDA; native environment](https://github.com/mlcommons/ck/blob/master/cm-mlops/challenge/run-mlperf%40home-v3.1-gpu/run-nvidia-gpu-gpt-j-6b-ref-pytorch.md)
+* [BERT-99 model (8GB min GPU memory); TensorRT; Docker](https://github.com/mlcommons/ck/blob/master/cm-mlops/challenge/run-mlperf%40home-v3.1-gpu/run-nvidia-gpu-bert-99-nvidia-docker-tensorrt.md)
+
+### Results
+
+All accepted results with submitter names will be publicly available
+at the official [MLCommons website](https://mlcommons.org)
+and in the [Collective Knowledge explorer (MLCommons CK)](https://access.cknowledge.org/playground/?action=experiments)
+along with the reproducibility and automation report to help the community
+build efficient AI/ML systems.
+
+### Organizers
+
+* [MLCommons Task Force on Automation and Reproducibility](https://cKnowledge.org/mlcommons-taskforce)
+* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation)
+* [cKnowledge.org](https://www.linkedin.com/company/cknowledge)
+
+### Advanced challenges
+
+If you feel that running these benchmarks was relatively easy,
+please try [more advanced challenges](https://access.cknowledge.org/playground/?action=challenges),
+read about our [plans and long-term vision](https://doi.org/10.5281/zenodo.8105339),
+check [CM documentation](https://github.com/mlcommons/ck/blob/master/docs/README.md)
+and run other [MLPerf benchmarks](https://github.com/mlcommons/ck/tree/master/docs/mlperf).
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/_cm.json
new file mode 100644
index 0000000000..af7deeadae
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/_cm.json
@@ -0,0 +1,20 @@
+{
+ "alias": "run-mlperf@home-v3.1-gpu",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close": "20230817",
+ "date_open": "20230725",
+ "experiments": [],
+ "points": 2,
+ "sort": -30,
+ "tags": [
+ "run",
+ "mlperf",
+ "inference",
+ "v3.1",
+ "mlperf-inference-v3.1-simple-cpu"
+ ],
+ "title": "Work with the community to find the most efficient Nvidia GPUs for GPT-J 6B model and BERT (latency, throughput, accuracy, number of cores, frequency, memory size, cost, and other metrics)",
+ "trophies": true,
+ "uid": "54230c3b66564cef"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/run-nvidia-gpu-bert-99-nvidia-docker-tensorrt.md b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/run-nvidia-gpu-bert-99-nvidia-docker-tensorrt.md
new file mode 100644
index 0000000000..f543c23621
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/run-nvidia-gpu-bert-99-nvidia-docker-tensorrt.md
@@ -0,0 +1,193 @@
+# Introduction
+
+This guide will help you run the Nvidia implementation of the MLPerf inference benchmark v3.1
+with BERT-99 model and TensorRT on any Linux-based system with Nvidia GPU (8..16GB min memory required)
+and Docker.
+
+This benchmark is semi-automated by the [MLCommons CM language](https://doi.org/10.5281/zenodo.8105339)
+and you should be able to submit official MLPerf v3.1 inference results
+for all scenarios in closed division and edge category
+(**deadline to send us results for v3.1 submission: August 3, 2023**).
+
+
+It will require ~30GB of disk space and can take ~2 hours to run on 1 system.
+
+
+## Install CM automation language
+
+Install the [MLCommons CM automation language](https://doi.org/10.5281/zenodo.8105339) as described in this [guide](../../../docs/installation.md).
+It is a small Python library with `cm` and `cmr` command line front-ends and minimal dependencies including Python 3+, Git and wget.
+
+If you encounter problems, please report them at [GitHub](https://github.com/mlcommons/ck/issues).
+
+
+## Install repository with CM automations
+
+Install the MLCommons repository with [reusable and portable automation recipes (CM scripts)](https://github.com/mlcommons/ck/tree/master/cm-mlops/script) via CM.
+These scripts are being developed and shared by the community and MLCommons under Apache 2.0 license
+to enable portable, modular, and technology-agnostic benchmarks and applications
+that can automatically run with any software, hardware, models and data sets.
+
+```bash
+cm pull repo mlcommons@ck
+```
+
+You can run it again at any time to pick up the latest updates.
+
+Note that CM will store all such repositories and downloaded/installed data sets, models and tools
+in your `$HOME/CM` directory.
+
+Since MLPerf benchmarks require lots of space (somethings hundreds of Gigabytes),
+you can change the above location to some large scratch disk using `CM_REPOS`
+environment variable as follows:
+
+```bash
+export CM_REPOS={new path to CM repositories and data}
+echo "CM_REPOS=${CM_REPOS} >> $HOME/.bashrc"
+cm pull repo mlcommons@ck
+```
+
+
+
+## Setup CUDA and Docker container
+
+### Download CUDA 11.8
+
+Nvidia recommends the following version of CUDA to be used with their MLPerf inference implementation:
+
+```
+wget https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run
+```
+
+However, you are very welcome to try another version!
+
+### Download cuDNN, TensorRT
+
+For x86 machines, please download the following TAR files:
+1. [cuDNN](https://developer.nvidia.com/cudnn) - note that Nvidia recommends `cudnn-linux-x86_64-8.9.2.26_cuda11-archive.tar.xz`
+ but you are welcome to try another version
+2. [TensorRT](https://developer.nvidia.com/tensorrt) - note that Nvidia recommends `TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz`
+ but you can try another version
+
+
+### Set up Nvidia Docker container with MLPerf benchmarks
+
+1. [Install Docker](https://docs.docker.com/engine/install/) and [Nvidia container toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)
+
+2. Give Docker permission to the current user
+ ```
+ sudo usermod -aG docker $USER
+ ```
+ Logout and login
+ Restart docker if required and confirm that Nvidia container toolkit is working by
+ ```
+ nvidia-ctk --version
+ ```
+
+3. Check if Nvidia driver is working properly on the host.
+ ```
+ nvidia-smi
+ ```
+ If the above command produces any error you'll need to install Nvidia drivers on the host. You can do this via CM if you have sudo access
+ ```
+ cmr "install cuda prebuilt _driver" --version=11.8.0
+ ```
+
+
+4. Build the docker container and mount the paths from the host machine.
+
+ *You may need to change --cuda_run_file_path, --tensorrt_tar_file_path and --cudnn_tar_file_path if you downloaded other versions than recommended by Nvidia.*
+
+ *You may want to change the `scratch_path` location as it can take 100s of GBs.*
+
+ ```bash
+ cm docker script --tags=build,nvidia,inference,server \
+ --cuda_run_file_path=$HOME/cuda_11.8.0_520.61.05_linux.run \
+ --tensorrt_tar_file_path=$HOME/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz \
+ --cudnn_tar_file_path=$HOME/cudnn-linux-x86_64-8.9.2.26_cuda11-archive.tar.xz \
+ --scratch_path=$HOME/mlperf_scratch \
+ --docker_cm_repo=mlcommons@ck \
+ --results_dir=$HOME/results_dir \
+ --submission_dir=$HOME/submission_dir \
+ --adr.compiler.tags=gcc
+ ```
+
+5. At the end of the build you'll get a prompt - please enter your system name such as "aws_nvidia_t4"
+ (note that space, `-` and other special characters are not allowed),
+ and say `yes` to generating the configuration files.
+
+ ```
+ ============================================
+ => A system ID is a string containing only letters, numbers, and underscores
+ => that is used as the human-readable name of the system. It is also used as
+ => the system name when creating the measurements/ and results/ entries.
+ => This string should also start with a letter to be a valid Python enum member name.
+ => Specify the system ID to use for the current system: phoenix
+ => Reloaded system list. MATCHED_SYSTEM: KnownSystem.phoenix
+ => This script will generate Benchmark Configuration stubs for the detected system.
+ Continue? [y/n]: y
+ ```
+ Now you'll be inside the CM Nvidia docker container and can access Nvidia implementations of MLPerf inference benchmarks.
+
+6. Once the build is complete, you can run Nvidia implementations of MLPerf inference benchmarks
+ using the unified CM interface.
+
+ You can also save the container at this stage using [Docker commit](https://docs.docker.com/engine/reference/commandline/commit/)
+ so that it can be launched later without having to go through the previous steps.
+
+
+### Do a test run to detect and record the system performance
+
+```
+cmr "generate-run-cmds inference _find-performance _all-scenarios" \
+ --model=bert-99 \
+ --implementation=nvidia-original \
+ --device=cuda \
+ --backend=tensorrt \
+ --category=edge \
+ --division=closed \
+ --test_query_count=1000 \
+ --quiet
+```
+
+### Do full accuracy and performance runs
+
+```
+cmr "generate-run-cmds inference _submission _allscenarios" \
+ --model=bert-99 \
+ --device=cuda \
+ --implementation=nvidia-original \
+ --backend=tensorrt \
+ --execution-mode=valid \
+ --results_dir=$HOME/results_dir \
+ --category=edge \
+ --division=closed \
+ --quiet
+```
+
+* `--offline_target_qps` and `--singlestream_target_latency` can be used to override the determined performance numbers
+
+### Populate the README files describing your submission
+
+```
+cmr "generate-run-cmds inference _populate-readme _all-scenarios" \
+ --model=bert-99 \
+ --device=cuda \
+ --implementation=nvidia-original \
+ --backend=tensorrt \
+ --execution-mode=valid \
+ --results_dir=$HOME/results_dir \
+ --category=edge \
+ --division=closed \
+ --quiet
+```
+
+### Generate and upload MLPerf submission
+
+Follow [this guide](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/Submission.md) to generate the submission tree and upload your results.
+
+
+## Questions? Suggestions?
+
+Please follow the [cTuning foundation](https://cTuning.org), [cKnowledge.org](https://cKnowledge.org)
+and [MLCommons](https://mlcommons.org).
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/run-nvidia-gpu-gpt-j-6b-ref-pytorch.md b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/run-nvidia-gpu-gpt-j-6b-ref-pytorch.md
new file mode 100644
index 0000000000..39b1cc0de2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/run-nvidia-gpu-gpt-j-6b-ref-pytorch.md
@@ -0,0 +1,314 @@
+# Introduction
+
+This guide will help you run the reference implementation of the MLPerf inference benchmark v3.1
+with GPT-J 6B model and PyTorch on any Linux-based system with Nvidia GPU (24GB min memory required)
+using the [MLCommons CM automation language](https://doi.org/10.5281/zenodo.8105339).
+
+CM will help you to obtain performance and accuracy numbers for GPT-J 6B model on your system
+for the SingleStream scenario and submit them to the official MLPerf v3.1 inference benchmarking round
+in open division and edge category
+(**deadline to send us results for v3.1 submission: August 3, 2023**).
+
+You can read more about scenarios, divisions and categories of MLPerf inference benchmarks
+in this [MLPerf inference benchmark paper](https://arxiv.org/abs/1911.02549) -
+our goal is to help the community compare performance, accuracy and other metrics of popular models across diverse systems
+in an automated, unified and reproducible way!
+
+This benchmark will require ~30GB of disk space and can take ~1 day to run on one system
+to have a valid MLPerf result.
+
+
+
+## Install CM automation language
+
+Install the [MLCommons CM automation language](https://github.com/mlcommons/ck) as described in this [guide](../../../docs/installation.md).
+It is a small Python library with `cm` and `cmr` command line front-ends and minimal dependencies including Python 3+, Git and wget.
+
+If you encounter problems, please report them at [GitHub](https://github.com/mlcommons/ck/issues).
+
+
+## Install repository with CM automations
+
+Install the MLCommons repository with [reusable and portable automation recipes (CM scripts)](https://github.com/mlcommons/ck/tree/master/cm-mlops/script) via CM.
+These scripts are being developed and shared by the community and MLCommons under Apache 2.0 license
+to enable portable, modular, and technology-agnostic benchmarks and applications
+that can automatically run with any software, hardware, models and data sets.
+
+```bash
+cm pull repo mlcommons@ck
+```
+
+You can run it again at any time to pick up the latest updates.
+
+Note that CM will store all such repositories and downloaded/installed data sets, models, and tools
+in your `$HOME/CM` directory.
+
+Since MLPerf benchmarks require lots of space (somethings hundreds of Gigabytes),
+you can change the above location to some large scratch disk using `CM_REPOS`
+environment variable as follows:
+
+```bash
+export CM_REPOS={new path to CM repositories and data}
+echo "CM_REPOS=${CM_REPOS} >> $HOME/.bashrc"
+cm pull repo mlcommons@ck
+```
+
+
+
+## Setup virtual environment
+
+We suggest you to setup a Python virtual environment via CM to avoid contaminating your existing Python installation:
+
+```bash
+cm run script "install python-venv" --name=mlperf --version_min=3.8
+export CM_SCRIPT_EXTRA_CMD="--adr.python.name=mlperf"
+```
+
+CM will install a new Python virtual environment in CM cache and will install all Python dependencies there:
+```bash
+cm show cache --tags=python-venv
+```
+
+Note that CM downloads and/or installs models, data sets, packages, libraries and tools in this cache.
+
+You can clean it at any time and start from scratch using the following command:
+```bash
+cm rm cache -f
+```
+
+Alternatively, you can remove specific entries using tags:
+```bash
+cm show cache
+cm rm cache --tags=tag1,tag2,...
+```
+
+
+## Do the performance run
+
+Now you can run MLPerf inference benchmark to measure performance of GPT-J using CM command as follows
+(note that `cmr` is equivalent to `cm run script`):
+
+```bash
+cm run script --tags=generate-run-cmds,inference,_performance-only \
+ --division=open \
+ --category=edge \
+ --model=gptj-99 \
+ --precision=bfloat16 \
+ --device=cuda \
+ --implementation=reference \
+ --backend=pytorch \
+ --scenario=SingleStream \
+ --env.GPTJ_BEAM_SIZE=1 \
+ --execution-mode=valid \
+ --results_dir=$HOME/results_dir \
+ --quiet
+```
+
+Note that this command will need to automatically download the model (24GB)
+and [CNN Daily Mail dataset (relatively small)](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-cnndm)!
+
+The benchmark run is expected to finish within 10-100 minutes depending on the performance of your GPU.
+
+In the end of the valid run, you should see [output](https://github.com/ctuning/mlperf_inference_submissions_v3.1/blob/main/open/cTuning/results/amd_zen4_workstation-reference-gpu-pytorch-v2.0.1-default_config/gptj-99/singlestream/performance/run_1/mlperf_log_summary.txt) similar to
+
+```txt
+================================================
+MLPerf Results Summary
+================================================
+SUT name : PySUT
+Scenario : SingleStream
+Mode : PerformanceOnly
+90th percentile latency (ns) : 4751920830
+Result is : VALID
+ Min duration satisfied : Yes
+ Min queries satisfied : Yes
+ Early stopping satisfied: Yes
+Early Stopping Result:
+ * Processed at least 64 queries (201).
+ * Would discard 9 highest latency queries.
+ * Early stopping 90th percentile estimate: 5387449249
+ * Not enough queries processed for 99th percentile
+ early stopping estimate (would need to process at
+ least 662 total queries).
+
+================================================
+Additional Stats
+================================================
+QPS w/ loadgen overhead : 0.33
+QPS w/o loadgen overhead : 0.33
+
+Min latency (ns) : 881803157
+Max latency (ns) : 5939081711
+Mean latency (ns) : 3008773902
+50.00 percentile latency (ns) : 2788885477
+90.00 percentile latency (ns) : 4751920830
+95.00 percentile latency (ns) : 5307244203
+97.00 percentile latency (ns) : 5677375096
+99.00 percentile latency (ns) : 5927209480
+99.90 percentile latency (ns) : 5939081711
+
+================================================
+Test Parameters Used
+================================================
+samples_per_query : 1
+target_qps : 2000
+target_latency (ns): 0
+max_async_queries : 1
+min_duration (ms): 600000
+max_duration (ms): 620000
+min_query_count : 100
+max_query_count : 0
+qsl_rng_seed : 148687905518835231
+sample_index_rng_seed : 520418551913322573
+schedule_rng_seed : 811580660758947900
+accuracy_log_rng_seed : 0
+accuracy_log_probability : 0
+accuracy_log_sampling_target : 0
+print_timestamps : 0
+performance_issue_unique : 0
+performance_issue_same : 0
+performance_issue_same_index : 0
+performance_sample_count : 13368
+
+No warnings encountered during test.
+
+No errors encountered during test.
+```
+
+
+## Do the accuracy run
+
+```bash
+cm run script --tags=generate-run-cmds,inference,_accuracy-only \
+ --division=open \
+ --category=edge \
+ --model=gptj-99 \
+ --precision=bfloat16 \
+ --device=cuda \
+ --implementation=reference \
+ --backend=pytorch \
+ --scenario=SingleStream \
+ --env.GPTJ_BEAM_SIZE=1 \
+ --execution-mode=valid \
+ --results_dir=$HOME/results_dir \
+ --quiet
+```
+
+This accuracy run can take many hours (typically 12..46 hours). You can estimate it using the QPS (queries per second)
+from the previous performance run as follows:
+
+accuracy time = data set / QPS = 13368 / QPS .
+
+For example, if your reported QPS is 0.1 (equivalent to 10000 ms latency), it will take 13368/0.1 ~ 37 hours.
+
+
+
+## Populate the MLPerf README files describing your submission
+
+Now you can use CM to automatically populate README files mandated by MLPerf to describe your submission
+(we also show you a simpler syntax of `cmr` instead of `cm run script --tags=`):
+
+```bash
+cmr "generate-run-cmds inference _populate-readme" \
+ --division=open \
+ --category=edge \
+ --model=gptj-99 \
+ --precision=bfloat16 \
+ --device=cuda \
+ --implementation=reference \
+ --backend=pytorch \
+ --scenario=SingleStream \
+ --env.GPTJ_BEAM_SIZE=1 \
+ --execution-mode=valid \
+ --results_dir=$HOME/results_dir \
+ --quiet
+```
+
+
+## Generate MLPerf submission
+
+Unless your organization is an official member of MLCommons, you will be able to participate in the official MLPerf inference community submission
+via the cTuning foundation (founding member of MLCommons).
+
+You should update the following flags in the below CM command:
+* Use `--hw_notes_extra` option to add your name to the submission such as `--hw_notes_extra="Result taken by NAME" `.
+* Use `--hw_name="My system name"` to give a meaningful system name describing your GPU.
+ Examples can be seen [here](https://github.com/mlcommons/inference_results_v3.0/tree/main/open/cTuning/systems).
+* Use `--submitter=` if your organization is an official MLCommons member and you would like to submit under your organization.
+
+You should use the master branch of MLCommons inference repo for the submission checker:
+
+```bash
+cmr "generate inference submission" \
+ --clean \
+ --submitter=cTuning \
+ --results_dir=$HOME/results_dir/valid_results \
+ --submission_dir=$HOME/inference_submission_tree \
+ --preprocess_submission=yes \
+ --adr.compiler.tags=gcc \
+ --adr.inference-src.version=master \
+ --run-checker
+```
+
+## Push the results to GitHub repo
+
+1. Create a fork of [this cTuning repo with the community results](https://github.com/ctuning/mlperf_inference_submissions_v3.1).
+
+2. Run the following command after replacing `--repo_url` with your fork URL.
+
+ ```
+ cmr "push github mlperf inference submission" \
+ --submission_dir=$HOME/inference_submission_tree \
+ --repo_url=https://github.com/ctuning/mlperf_inference_submissions_v3.1/ \
+ --commit_message="GPTJ results on added by "
+ ```
+
+3. Create a PR to the [cTuning repo with the community results](https://github.com/ctuning/mlperf_inference_submissions_v3.1)
+
+
+
+
+
+
+
+
+
+## Additional performance optimization challenge for interested enthusiasts
+
+The MLPerf GPT-J inference benchmark is implemented in this [backend.py](https://github.com/mlcommons/inference/blob/master/language/gpt-j/backend.py).
+
+It is automatically installed and cached by CM. You can find it on your system using this command:
+```bash
+cd `cm find cache --tags=inference,src,_branch.master`/language/gpt-j
+ls backend.py
+```
+
+The original model is available at the [Hugging Face Zoo](https://huggingface.co/EleutherAI/gpt-j-6b). It was fine-tuned by Intel for this benchmark
+and is available at the MLCommons cloud. It is automatically downloaded by CM using [this script](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-ml-model-gptj/_cm.json).
+
+You can try to improve the performance (QPS) on this code or fine-tune model and substitute the default one
+in [this line](https://github.com/mlcommons/inference/blob/master/language/gpt-j/backend.py#L27).
+
+Some examples of fine-tuning can be seen [here](https://betterprogramming.pub/fine-tuning-gpt-j-6b-on-google-colab-or-equivalent-desktop-or-server-gpu-b6dc849cb205).
+
+Any better performance or accuracy result will be very valuable to the community.
+
+After any modification, you can redo a quick performance run to see the performance difference.
+```
+cm run script --tags=generate-run-cmds,inference,_performance-only \
+ --division=open \
+ --category=edge \
+ --model=gptj-99 \
+ --precision=bfloat16 \
+ --device=cuda \
+ --implementation=reference \
+ --backend=pytorch \
+ --scenario=SingleStream \
+ --env.GPTJ_BEAM_SIZE=1 \
+ --execution-mode=valid \
+ --results_dir=$HOME/results_dir \
+ --quiet
+```
+
+
+
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/train-llm-for-cm-mlperf-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/train-llm-for-cm-mlperf-2023/README.md
new file mode 100644
index 0000000000..4e9f6cf178
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/train-llm-for-cm-mlperf-2023/README.md
@@ -0,0 +1,20 @@
+### Challenge
+
+Improve the prototype of our LLM-based assistant to suggest users how to run MLPerf inference benchmarks
+using the MLCommons CM automation language: https://access.cknowledge.org/assistant .
+
+Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
+to run reference implementations of MLPerf inference benchmarks
+using the CM automation language and use them as a base for your developments.
+
+Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision.
+
+
+### Prizes
+
+* *Get in touch with organizers for more info!*
+
+
+### Organizers
+
+* [cKnowledge.org](https://www.linkedin.com/company/cknowledge)
diff --git a/cmx4mlops/cmx4mlops/repo/challenge/train-llm-for-cm-mlperf-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/train-llm-for-cm-mlperf-2023/_cm.json
new file mode 100644
index 0000000000..ce6009db37
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/challenge/train-llm-for-cm-mlperf-2023/_cm.json
@@ -0,0 +1,21 @@
+{
+ "alias": "train-llm-for-cm-mlperf-2023",
+ "automation_alias": "challenge",
+ "automation_uid": "3d84abd768f34e08",
+ "date_close_extension": true,
+ "date_open": "20230704",
+ "experiments": [],
+ "points": 3,
+ "tags": [
+ "train",
+ "improve",
+ "llm",
+ "assistant",
+ "mlperf-llm",
+ "mlperf-llm-assistant",
+ "mlperf-assistant"
+ ],
+ "title": "Train and improve LLM to suggest users how to run MLPerf inference benchmarks using CM automation language",
+ "trophies": true,
+ "uid": "d37bf37a24c44ec3"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/docs/cm-yaml-guide.md b/cmx4mlops/cmx4mlops/repo/docs/cm-yaml-guide.md
new file mode 100644
index 0000000000..2b0b1242b0
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/cm-yaml-guide.md
@@ -0,0 +1,46 @@
+This README provides a walkthrough of the `_cm.yaml` file.
+
+## Keys and Datatypes followed
+
+1. **alias**: `string`
+2. **uid**: `string`
+3. **automation_alias**: `string`
+4. **automation_uid**: `string`
+5. **category**: `string`
+6. **developers**: `list of strings`
+7. **tags**: `list of strings`
+8. **default_env**: `dictionary` - Contains key-value pairs where values are `strings`
+9. **env**: `dictionary` - Contains key-value pairs where values are `strings`
+10. **input_mapping**: `dictionary` - Contains key-value pairs where values are `strings`
+11. **env_key_mapping**: `dictionary` - Contains key-value pairs where values are `strings`
+12. **new_env_keys**: `list of strings`
+13. **new_state_keys**: `list of strings`
+14. **deps**: `list of dictionaries` - Each dictionary can contain `tags` or other nested keys
+15. **names**: `list of strings`
+16. **enable_if_env**: `dictionary` - Contains key-value pairs where values are lists of `strings`
+17. **skip_if_env**: `dictionary` - Contains key-value pairs where values are lists of `strings`
+18. **prehook_deps**: `list of dictionaries` - Each dictionary may contain `names` and `tags` as lists
+19. **posthook_deps**: `list of dictionaries` - Each dictionary may contain `tags` and other keys
+20. **variation_groups_order**: `list of strings`
+21. **variations**: `dictionary` - Each variation is a dictionary containing keys like `alias`, `default_variations`, `group`, etc.
+22. **group**: `string`
+23. **add_deps_recursive**: `dictionary` - Contains nested `tags` and other keys
+24. **default_variations**: `dictionary` - Contains key-value pairs where values are `strings`
+25. **docker**: `dictionary` - Contains keys specific to Docker configurations:
+ - **base_image**: `string`
+ - **image_name**: `string`
+ - **os**: `string`
+ - **os_version**: `string`
+ - **deps**: `list of dictionaries` - Each dictionary can include `tags` or other keys.
+ - **env**: `dictionary` - Contains key-value pairs where values are `strings`
+ - **interactive**: `boolean`
+ - **extra_run_args**: `string`
+ - **mounts**: `list of strings` - Specifies mount paths in the format `"source:destination"`
+ - **pre_run_cmds**: `list of strings` - Commands to run before the container starts
+ - **docker_input_mapping**: `dictionary` - Contains key-value pairs where values are strings, mapping input parameters to Docker environment variables
+ - **use_host_user_id**: `boolean`
+ - **use_host_group_id**: `boolean`
+ - **skip_run_cmd**: `string`
+ - **shm_size**: `string`
+ - **real_run**: `boolean`
+ - **all_gpus**: `string`
diff --git a/cmx4mlops/cmx4mlops/repo/docs/getting-started.md b/cmx4mlops/cmx4mlops/repo/docs/getting-started.md
new file mode 100644
index 0000000000..baed31eea3
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/getting-started.md
@@ -0,0 +1,135 @@
+
+# Getting Started with CM Script Automation
+
+## Running CM Scripts
+
+To execute a simple script in CM that captures OS details, use the following command:
+
+```bash
+cm run script --tags=detect,os -j
+```
+
+This command gathers details about the system on which it's run, such as:
+
+```json
+{
+ "CM_HOST_OS_TYPE": "linux",
+ "CM_HOST_OS_BITS": "64",
+ "CM_HOST_OS_FLAVOR": "ubuntu",
+ "CM_HOST_OS_FLAVOR_LIKE": "debian",
+ "CM_HOST_OS_VERSION": "24.04",
+ "CM_HOST_OS_KERNEL_VERSION": "6.8.0-45-generic",
+ "CM_HOST_OS_GLIBC_VERSION": "2.39",
+ "CM_HOST_OS_MACHINE": "x86_64",
+ "CM_HOST_OS_PACKAGE_MANAGER": "apt",
+ "CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD": "DEBIAN_FRONTEND=noninteractive apt-get install -y",
+ "CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD": "apt-get update -y",
+ "+CM_HOST_OS_DEFAULT_LIBRARY_PATH": [
+ "/usr/local/lib/x86_64-linux-gnu",
+ "/lib/x86_64-linux-gnu",
+ "/usr/lib/x86_64-linux-gnu",
+ "/usr/lib/x86_64-linux-gnu64",
+ "/usr/local/lib64",
+ "/lib64",
+ "/usr/lib64",
+ "/usr/local/lib",
+ "/lib",
+ "/usr/lib",
+ "/usr/x86_64-linux-gnu/lib64",
+ "/usr/x86_64-linux-gnu/lib"
+ ],
+ "CM_HOST_PLATFORM_FLAVOR": "x86_64",
+ "CM_HOST_PYTHON_BITS": "64",
+ "CM_HOST_SYSTEM_NAME": "intel-spr-i9"
+}
+```
+
+For more details on CM scripts, see the [CM documentation](index.md).
+
+### Adding New CM Scripts
+
+CM aims to provide lightweight connectors between existing automation scripts and tools without substituting them. You can add your own scripts to CM with the following command, which creates a script named `hello-world`:
+
+```bash
+cm add script hello-world --tags=hello-world,display,test
+```
+
+This command initializes a CM script in the local repository with the following structure:
+
+```
+└── CM
+ ├── index.json
+ ├── repos
+ │ ├── local
+ │ │ ├── cfg
+ │ │ ├── cache
+ │ │ ├── cmr.yaml
+ │ │ └── script
+ │ │ └── hello-world
+ │ │ ├── _cm.yaml
+ │ │ ├── customize.py
+ │ │ ├── README-extra.md
+ │ │ ├── run.bat
+ │ │ └── run.sh
+ │ └── mlcommons@cm4mlops
+ └── repos.json
+```
+
+You can also execute the script from Python as follows:
+
+```python
+import cmind
+output = cmind.access({'action':'run', 'automation':'script', 'tags':'hello-world,display,test'})
+if output['return'] == 0:
+ print(output)
+```
+
+If you discover that your new script is similar to an existing script in any CM repository, you can clone an existing script using the following command:
+
+```bash
+cm copy script .:
+```
+
+Here, `` is the name of the existing script, and `` is the name of the new script you're creating. Existing script names in the `cm4mlops` repository can be found [here](https://github.com/mlcommons/cm4mlops/tree/mlperf-inference/script).
+
+## Caching and Reusing CM Script Outputs
+
+By default, CM scripts run in the current directory and record all new files there. For example, a universal download script might download an image to the current directory:
+
+```bash
+cm run script --tags=download,file,_wget --url=https://cKnowledge.org/ai/data/computer_mouse.jpg --verify=no --env.CM_DOWNLOAD_CHECKSUM=45ae5c940233892c2f860efdf0b66e7e
+```
+
+To cache and reuse the output of scripts, CM offers a `cache` automation feature similar to `script`. When `"cache":true` is specified in a script's metadata, CM will create a `cache` directory in `$HOME/CM/repos/local` with a unique ID and the same tags as `script`, and execute the script there.
+
+Subsequent executions of the same script will reuse files from the cache, avoiding redundancy. This is especially useful for large files or data sets.
+
+You can manage cache entries and find specific ones using commands like:
+
+```bash
+cm show cache
+cm show cache --tags=get,ml-model,resnet50,_onnx
+cm find cache --tags=download,file,ml-model,resnet50,_onnx
+cm info cache --tags=download,file,ml-model,resnet50,_onnx
+```
+
+To clean cache entries:
+
+```bash
+cm rm cache --tags=ml-model,resnet50
+cm rm cache -f # Clean all entries
+```
+
+You can completely reset the CM framework by removing the `$HOME/CM` directory, which deletes all downloaded repositories and cached entries.
+
+## Integration with Containers
+
+CM scripts are designed to run natively or inside containers with the same commands. You can substitute `cm run script` with `cm docker script` to execute a script inside an automatically-generated container:
+
+```bash
+cm docker script --tags=python,app,image-classification,onnx,_cpu
+```
+
+CM automatically handles the generation of Dockerfiles, building of containers, and execution within containers, providing a seamless experience whether running scripts natively or in containers.
+
+This approach simplifies the development process by eliminating the need for separate Dockerfile maintenance and allows for the use of native scripts and workflows directly within containers.
diff --git a/cmx4mlops/cmx4mlops/repo/docs/img/logo_v2.svg b/cmx4mlops/cmx4mlops/repo/docs/img/logo_v2.svg
new file mode 100644
index 0000000000..fb655c6278
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/img/logo_v2.svg
@@ -0,0 +1,6 @@
+
+
diff --git a/cmx4mlops/cmx4mlops/repo/docs/img/pages (80).png b/cmx4mlops/cmx4mlops/repo/docs/img/pages (80).png
new file mode 100644
index 0000000000..0ca65735a7
Binary files /dev/null and b/cmx4mlops/cmx4mlops/repo/docs/img/pages (80).png differ
diff --git a/cmx4mlops/cmx4mlops/repo/docs/index.md b/cmx4mlops/cmx4mlops/repo/docs/index.md
new file mode 100644
index 0000000000..9a74cd2b34
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/index.md
@@ -0,0 +1,79 @@
+# CM "script" automation specification
+
+Please check the [CM documentation](https://docs.mlcommons.org/ck) for more details about the CM automation language.
+
+See the [automatically generated catalog](scripts/index.md) of all CM scripts from MLCommons.
+
+## Understanding CM scripts
+
+* A CM script is identified by a set of tags and by unique ID.
+* Further each CM script can have multiple variations and they are identified by variation tags which are treated in the same way as tags and identified by a `_` prefix.
+
+### CM script execution flow
+
+
+* When a CM script is invoked (either by tags or by unique ID), its `_cm.json` is processed first which will check for any `deps` script and if there are, then they are executed in order.
+* Once all the `deps` scripts are executed, `customize.py` file is checked and if existing `preprocess` function inside it is executed if present.
+* Then any `prehook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps`
+* After this, keys in `env` dictionary is exported as `ENV` variables and `run` file if exists is executed.
+* Once run file execution is done, any `posthook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps`
+* Then `postprocess` function inside customize.py is executed if present.
+* After this stage any `post_deps` CM scripts mentioned in `_cm.json` is executed.
+
+** If a script is already cached, then the `preprocess`, `run file` and `postprocess` executions won't happen and only the dependencies marked as `dynamic` will be executed from `deps`, `prehook_deps`, `posthook_deps` and `postdeps`.
+
+### Input flags
+When we run a CM script we can also pass inputs to it and any input added in `input_mapping` dictionary inside `_cm.json` gets converted to the corresponding `ENV` variable.
+
+### Conditional execution of any `deps`, `post_deps`
+We can use `skip_if_env` dictionary inside any `deps`, `prehook_deps`, `posthook_deps` or `post_deps` to make its execution conditional
+
+### Versions
+We can specify any specific version of a script using `version`. `version_max` and `version_min` are also possible options.
+
+* When `version_min` is given, any version above this if present in the cache or detected in the system can be chosen. If nothing is detected `default_version` if present and if above `version_min` will be used for installation. Otherwise `version_min` will be used as `version`.
+
+* When `version_max` is given, any version below this if present in the cache or detected in the system can be chosen. If nothing is detected `default_version` if present and if below `version_max` will be used for installation. Otherwise `version_max_usable` (additional needed input for `version_max`) will be used as `version`.
+
+### Variations
+* Variations are used to customize CM script and each unique combination of variations uses a unique cache entry. Each variation can turn on `env` keys also any other meta including dependencies specific to it. Variations are turned on like tags but with a `_` prefix. For example, if a script is having tags `"get,myscript"`, to call the variation `"test"` inside it, we have to use tags `"get,myscript,_test"`.
+
+#### Variation groups
+`group` is a key to map variations into a group and at any time only one variation from a group can be used in the variation tags. For example, both `cpu` and `cuda` can be two variations under the `device` group, but user can at any time use either `cpu` or `cuda` as variation tags but not both.
+
+#### Dynamic variations
+Sometimes it is difficult to add all variations needed for a script like say `batch_size` which can take many different values. To handle this case, we support dynamic variations using '#' where '#' can be dynamically replaced by any string. For example, `"_batch_size.8"` can be used as a tag to turn on the dynamic variation `"_batch_size.#"`.
+
+### ENV flow during CM script execution
+
+
+* During a given script execution incoming `env` dictionary is saved `(saved_env)` and all the updates happens on a copy of it.
+* Once a script execution is over (which includes all the dependent script executions as well), newly created keys and any updated keys are merged with the `saved_env` provided the keys are mentioned in `new_env_keys`
+* Same behaviour applies to `state` dictionary.
+
+#### Special env keys
+* Any env key with a prefix `CM_TMP_*` and `CM_GIT_*` are not passed by default to any dependency. These can be force passed by adding the key(s) to the `force_env_keys` list of the concerned dependency.
+* Similarly we can avoid any env key from being passed to a given dependency by adding the prefix of the key in the `clean_env_keys` list of the concerned dependency.
+* `--input` is automatically converted to `CM_INPUT` env key
+* `version` is converted to `CM_VERSION`, ``version_min` to `CM_VERSION_MIN` and `version_max` to `CM_VERSION_MAX`
+* If `env['CM_GH_TOKEN']=TOKEN_VALUE` is set then git URLs (specified by `CM_GIT_URL`) are changed to add this token.
+* If `env['CM_GIT_SSH']=yes`, then git URLs are changed to SSH from HTTPS.
+
+### Script Meta
+#### Special keys in script meta
+* TBD: `reuse_version`, `inherit_variation_tags`, `update_env_tags_from_env`
+
+### How cache works?
+* If `cache=true` is set in a script meta, the result of the script execution is cached for further use.
+* For a cached script, `env` and `state` updates are done using `new_env` and `new_state` dictionaries which are stored in the `cm-cached.json` file inside the cached folder.
+* By using `--new` input, a new cache entry can be forced even when an old one exist.
+* By default no depndencies are run for a cached entry unless `dynamic` key is set for it.
+
+
+Please see [here](getting-started.md) for trying CM scripts.
+
+
+
+
+© 2022-24 [MLCommons](https://mlcommons.org)
+
diff --git a/cmx4mlops/cmx4mlops/repo/docs/requirements.txt b/cmx4mlops/cmx4mlops/repo/docs/requirements.txt
new file mode 100644
index 0000000000..ee5149cfc6
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/requirements.txt
@@ -0,0 +1,6 @@
+mkdocs-material
+swagger-markdown
+mkdocs-macros-plugin
+ruamel.yaml
+slugify
+mkdocs-caseinsensitive-plugin
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-croissant/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-croissant/index.md
new file mode 100644
index 0000000000..f707f1f85c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-croissant/index.md
@@ -0,0 +1,86 @@
+# get-croissant
+Automatically generated README for this automation recipe: **get-croissant**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-croissant/_cm.yaml)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get mlcommons croissant" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,mlcommons,croissant
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get mlcommons croissant "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,mlcommons,croissant'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get mlcommons croissant"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-croissant/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-croissant/run.bat)
+___
+#### Script output
+```bash
+cmr "get mlcommons croissant " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-cifar10/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-cifar10/index.md
new file mode 100644
index 0000000000..f74ec73ef5
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-cifar10/index.md
@@ -0,0 +1,119 @@
+# get-dataset-cifar10
+Automatically generated README for this automation recipe: **get-dataset-cifar10**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-cifar10/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get dataset cifar10 image-classification validation training" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,dataset,cifar10,image-classification,validation,training[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get dataset cifar10 image-classification validation training [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,dataset,cifar10,image-classification,validation,training'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get dataset cifar10 image-classification validation training[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_tiny`
+ - ENV variables:
+ - CM_DATASET_CONVERT_TO_TINYMLPERF: `yes`
+
+
+
+
+ * Group "**data_format**"
+
+ Click here to expand this section.
+
+ * **`_python`** (default)
+ - ENV variables:
+ - CM_DATASET: `CIFAR10`
+ - CM_DATASET_FILENAME: `cifar-10-python.tar.gz`
+ - CM_DATASET_FILENAME1: `cifar-10-python.tar`
+ - CM_DATASET_CIFAR10: `https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz`
+
+
+
+
+ ##### Default variations
+
+ `_python`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-cifar10/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-cifar10/run.bat)
+___
+#### Script output
+```bash
+cmr "get dataset cifar10 image-classification validation training [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-cnndm/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-cnndm/index.md
new file mode 100644
index 0000000000..22ae3381aa
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-cnndm/index.md
@@ -0,0 +1,128 @@
+# get-dataset-cnndm
+Automatically generated README for this automation recipe: **get-dataset-cnndm**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-cnndm/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get dataset gpt-j cnndm cnn-dailymail original" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,dataset,gpt-j,cnndm,cnn-dailymail,original[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get dataset gpt-j cnndm cnn-dailymail original [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,dataset,gpt-j,cnndm,cnn-dailymail,original'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get dataset gpt-j cnndm cnn-dailymail original[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_intel`
+
+
+
+
+ * Group "**dataset-type**"
+
+ Click here to expand this section.
+
+ * `_calibration`
+ - ENV variables:
+ - CM_DATASET_CALIBRATION: `yes`
+ * **`_validation`** (default)
+ - ENV variables:
+ - CM_DATASET_CALIBRATION: `no`
+
+
+
+
+ ##### Default variations
+
+ `_validation`
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_DATASET_CALIBRATION: `no`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-intel.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-cnndm/run-intel.sh)
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-cnndm/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get dataset gpt-j cnndm cnn-dailymail original [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-coco/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-coco/index.md
new file mode 100644
index 0000000000..98c9f978ea
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-coco/index.md
@@ -0,0 +1,140 @@
+# get-dataset-coco
+Automatically generated README for this automation recipe: **get-dataset-coco**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-coco/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-coco/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get dataset object-detection coco" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,dataset,object-detection,coco[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get dataset object-detection coco [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,dataset,object-detection,coco'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get dataset object-detection coco[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**size**"
+
+ Click here to expand this section.
+
+ * **`_complete`** (default)
+ - ENV variables:
+ - CM_DATASET_COCO_SIZE: `complete`
+ * `_small`
+ - ENV variables:
+ - CM_DATASET_COCO_SIZE: `small`
+
+
+
+
+ * Group "**type**"
+
+ Click here to expand this section.
+
+ * `_train`
+ - ENV variables:
+ - CM_DATASET_COCO_TYPE: `train`
+ * **`_val`** (default)
+ - ENV variables:
+ - CM_DATASET_COCO_TYPE: `val`
+
+
+
+
+ * Group "**version**"
+
+ Click here to expand this section.
+
+ * **`_2017`** (default)
+ - ENV variables:
+ - CM_DATASET_COCO_VERSION: `2017`
+
+
+
+
+ ##### Default variations
+
+ `_2017,_complete,_val`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--from=value` → `CM_FROM=value`
+ * `--home=value` → `CM_HOME_DIR=value`
+ * `--store=value` → `CM_STORE=value`
+ * `--to=value` → `CM_TO=value`
+
+
+
+
+___
+#### Script output
+```bash
+cmr "get dataset object-detection coco [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-coco2014/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-coco2014/index.md
new file mode 100644
index 0000000000..23e09b06fb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-coco2014/index.md
@@ -0,0 +1,152 @@
+# get-dataset-coco2014
+Automatically generated README for this automation recipe: **get-dataset-coco2014**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-coco2014/_cm.yaml)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get dataset coco2014 object-detection original" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,dataset,coco2014,object-detection,original[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get dataset coco2014 object-detection original [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,dataset,coco2014,object-detection,original'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get dataset coco2014 object-detection original[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**annotations**"
+
+ Click here to expand this section.
+
+ * `_custom-annotations`
+ - ENV variables:
+ - CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS: `yes`
+ * **`_default-annotations`** (default)
+ - ENV variables:
+ - CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS: `no`
+
+
+
+
+ * Group "**dataset-type**"
+
+ Click here to expand this section.
+
+ * `_calibration`
+ - ENV variables:
+ - CM_DATASET_CALIBRATION: `yes`
+ * **`_validation`** (default)
+ - ENV variables:
+ - CM_DATASET_CALIBRATION: `no`
+
+
+
+
+ * Group "**size**"
+
+ Click here to expand this section.
+
+ * **`_50`** (default)
+ - ENV variables:
+ - CM_DATASET_SIZE: `50`
+ * `_500`
+ - ENV variables:
+ - CM_DATASET_SIZE: `500`
+ * `_full`
+ - ENV variables:
+ - CM_DATASET_SIZE: ``
+ * `_size.#`
+ - ENV variables:
+ - CM_DATASET_SIZE: `#`
+
+
+
+
+ ##### Default variations
+
+ `_50,_default-annotations,_validation`
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_DATASET_CALIBRATION: `no`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-coco2014/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-coco2014/run.bat)
+___
+#### Script output
+```bash
+cmr "get dataset coco2014 object-detection original [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-criteo/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-criteo/index.md
new file mode 100644
index 0000000000..f28c6e10d9
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-criteo/index.md
@@ -0,0 +1,124 @@
+# get-dataset-criteo
+Automatically generated README for this automation recipe: **get-dataset-criteo**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-criteo/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-criteo/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get dataset criteo original" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,dataset,criteo,original[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get dataset criteo original [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,dataset,criteo,original'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get dataset criteo original[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_backup`
+ - ENV variables:
+ - CM_BACKUP_ZIPS: `yes`
+ * `_fake`
+ - ENV variables:
+ - CM_CRITEO_FAKE: `yes`
+
+
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--criteo_path=value` → `CM_CRITEO_PATH=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_BACKUP_ZIPS: `no`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-criteo/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get dataset criteo original [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-aux/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-aux/index.md
new file mode 100644
index 0000000000..1abab6599e
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-aux/index.md
@@ -0,0 +1,119 @@
+# get-dataset-imagenet-aux
+Automatically generated README for this automation recipe: **get-dataset-imagenet-aux**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-aux/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get aux dataset-aux image-classification imagenet-aux" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,aux,dataset-aux,image-classification,imagenet-aux[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get aux dataset-aux image-classification imagenet-aux [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,aux,dataset-aux,image-classification,imagenet-aux'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get aux dataset-aux image-classification imagenet-aux[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_2012`
+ - ENV variables:
+ - CM_DATASET_AUX_VER: `2012`
+
+
+
+
+ * Group "**download-source**"
+
+ Click here to expand this section.
+
+ * `_from.berkeleyvision`
+ - ENV variables:
+ - CM_WGET_URL: `http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz`
+ * **`_from.dropbox`** (default)
+ - ENV variables:
+ - CM_WGET_URL: `https://www.dropbox.com/s/92n2fyej3lzy3s3/caffe_ilsvrc12.tar.gz`
+
+
+
+
+ ##### Default variations
+
+ `_from.dropbox`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-aux/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-aux/run.bat)
+___
+#### Script output
+```bash
+cmr "get aux dataset-aux image-classification imagenet-aux [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-calibration/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-calibration/index.md
new file mode 100644
index 0000000000..7aae04d884
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-calibration/index.md
@@ -0,0 +1,104 @@
+# get-dataset-imagenet-calibration
+Automatically generated README for this automation recipe: **get-dataset-imagenet-calibration**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-calibration/_cm.yaml)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get dataset imagenet calibration" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,dataset,imagenet,calibration[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get dataset imagenet calibration [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,dataset,imagenet,calibration'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get dataset imagenet calibration[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**calibration-option**"
+
+ Click here to expand this section.
+
+ * **`_mlperf.option1`** (default)
+ - ENV variables:
+ - CM_MLPERF_IMAGENET_CALIBRATION_OPTION: `one`
+ - CM_DOWNLOAD_CHECKSUM: `f09719174af3553119e2c621157773a6`
+ * `_mlperf.option2`
+ - ENV variables:
+ - CM_MLPERF_IMAGENET_CALIBRATION_OPTION: `two`
+ - CM_DOWNLOAD_CHECKSUM: `e44582af00e3b4fc3fac30efd6bdd05f`
+
+
+
+
+ ##### Default variations
+
+ `_mlperf.option1`
+
+___
+#### Script output
+```bash
+cmr "get dataset imagenet calibration [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-helper/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-helper/index.md
new file mode 100644
index 0000000000..48b39fa405
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-helper/index.md
@@ -0,0 +1,80 @@
+# get-dataset-imagenet-helper
+Automatically generated README for this automation recipe: **get-dataset-imagenet-helper**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-helper/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get imagenet helper imagenet-helper" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,imagenet,helper,imagenet-helper
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get imagenet helper imagenet-helper "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,imagenet,helper,imagenet-helper'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get imagenet helper imagenet-helper"
+ ```
+___
+
+
+___
+#### Script output
+```bash
+cmr "get imagenet helper imagenet-helper " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-train/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-train/index.md
new file mode 100644
index 0000000000..2b8bb952fc
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-train/index.md
@@ -0,0 +1,96 @@
+# get-dataset-imagenet-train
+Automatically generated README for this automation recipe: **get-dataset-imagenet-train**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-train/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get imagenet train dataset original" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,imagenet,train,dataset,original [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get imagenet train dataset original " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,imagenet,train,dataset,original'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get imagenet train dataset original" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--input=value` → `IMAGENET_TRAIN_PATH=value`
+ * `--torrent=value` → `CM_DATASET_IMAGENET_TRAIN_TORRENT_PATH=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-train/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get imagenet train dataset original " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-val/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-val/index.md
new file mode 100644
index 0000000000..d9cd7b787f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-val/index.md
@@ -0,0 +1,149 @@
+# get-dataset-imagenet-val
+Automatically generated README for this automation recipe: **get-dataset-imagenet-val**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-val/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-val/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get val validation dataset imagenet ILSVRC image-classification original" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,val,validation,dataset,imagenet,ILSVRC,image-classification,original[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get val validation dataset imagenet ILSVRC image-classification original [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,val,validation,dataset,imagenet,ILSVRC,image-classification,original'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get val validation dataset imagenet ILSVRC image-classification original[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_2012-500`
+ * `_2012-full`
+ * `_run-during-docker-build`
+
+
+
+
+ * Group "**count**"
+
+ Click here to expand this section.
+
+ * `_full`
+ - ENV variables:
+ - CM_DATASET_SIZE: `50000`
+ - CM_IMAGENET_FULL: `yes`
+ - CM_DAE_FILENAME: `ILSVRC2012_img_val.tar`
+ - CM_DAE_DOWNLOADED_CHECKSUM: `29b22e2961454d5413ddabcf34fc5622`
+ * `_size.#`
+ - ENV variables:
+ - CM_DATASET_SIZE: `#`
+ * **`_size.500`** (default)
+ - ENV variables:
+ - CM_DATASET_SIZE: `500`
+ - CM_DAE_FILENAME: `ILSVRC2012_img_val_500.tar`
+ - CM_DAE_URL: `http://cKnowledge.org/ai/data/ILSVRC2012_img_val_500.tar`
+
+
+
+
+ * Group "**dataset-version**"
+
+ Click here to expand this section.
+
+ * **`_2012`** (default)
+ - ENV variables:
+ - CM_DATASET_VER: `2012`
+
+
+
+
+ ##### Default variations
+
+ `_2012,_size.500`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--imagenet_path=value` → `IMAGENET_PATH=value`
+ * `--torrent=value` → `CM_DATASET_IMAGENET_VAL_TORRENT_PATH=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ No run file exists for Linux/macOS
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-val/run.bat)
+___
+#### Script output
+```bash
+cmr "get val validation dataset imagenet ILSVRC image-classification original [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-kits19/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-kits19/index.md
new file mode 100644
index 0000000000..5010afffca
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-kits19/index.md
@@ -0,0 +1,138 @@
+# get-dataset-kits19
+Automatically generated README for this automation recipe: **get-dataset-kits19**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-kits19/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get dataset medical-imaging kits original kits19" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,dataset,medical-imaging,kits,original,kits19[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get dataset medical-imaging kits original kits19 [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,dataset,medical-imaging,kits,original,kits19'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get dataset medical-imaging kits original kits19[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_calibration`
+ - ENV variables:
+ - CM_DATASET_CALIBRATION: `yes`
+ * `_default`
+ - ENV variables:
+ - CM_GIT_PATCH: `no`
+ * `_full-history`
+ - ENV variables:
+ - CM_GIT_DEPTH: ``
+ * `_no-recurse-submodules`
+ - ENV variables:
+ - CM_GIT_RECURSE_SUBMODULES: ``
+ * `_patch`
+ - ENV variables:
+ - CM_GIT_PATCH: `yes`
+ * `_short-history`
+ - ENV variables:
+ - CM_GIT_DEPTH: `--depth 5`
+ * `_validation`
+ - ENV variables:
+ - CM_DATASET_VALIDATION: `yes`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_GIT_CHECKOUT: `master`
+ * CM_GIT_DEPTH: `--depth 2`
+ * CM_GIT_PATCH: `no`
+ * CM_GIT_RECURSE_SUBMODULES: ``
+ * CM_GIT_URL: `https://github.com/neheller/kits19`
+
+
+#### Versions
+Default version: `master`
+
+* `custom`
+* `master`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-kits19/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get dataset medical-imaging kits original kits19 [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-librispeech/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-librispeech/index.md
new file mode 100644
index 0000000000..05be625ada
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-librispeech/index.md
@@ -0,0 +1,97 @@
+# get-dataset-librispeech
+Automatically generated README for this automation recipe: **get-dataset-librispeech**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-librispeech/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-librispeech/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get dataset speech speech-recognition librispeech validation audio training original" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,dataset,speech,speech-recognition,librispeech,validation,audio,training,original
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get dataset speech speech-recognition librispeech validation audio training original "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,dataset,speech,speech-recognition,librispeech,validation,audio,training,original'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get dataset speech speech-recognition librispeech validation audio training original"
+ ```
+___
+
+#### Versions
+Default version: `dev-clean`
+
+* `dev-clean`
+* `dev-other`
+* `test-clean`
+* `test-other`
+* `train-clean-100`
+* `train-clean-360`
+* `train-other-500`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-librispeech/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get dataset speech speech-recognition librispeech validation audio training original " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages-annotations/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages-annotations/index.md
new file mode 100644
index 0000000000..05578105ca
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages-annotations/index.md
@@ -0,0 +1,105 @@
+# get-dataset-openimages-annotations
+Automatically generated README for this automation recipe: **get-dataset-openimages-annotations**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages-annotations/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get aux dataset-aux object-detection openimages annotations" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,aux,dataset-aux,object-detection,openimages,annotations[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get aux dataset-aux object-detection openimages annotations [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,aux,dataset-aux,object-detection,openimages,annotations'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get aux dataset-aux object-detection openimages annotations[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**download-source**"
+
+ Click here to expand this section.
+
+ * **`_from.github`** (default)
+ - ENV variables:
+ - CM_WGET_URL: `https://github.com/mlcommons/inference/releases/download/v2.1/openimages-mlperf_annotations_2.1.json.zip`
+
+
+
+
+ ##### Default variations
+
+ `_from.github`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages-annotations/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get aux dataset-aux object-detection openimages annotations [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages-calibration/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages-calibration/index.md
new file mode 100644
index 0000000000..6e634f401f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages-calibration/index.md
@@ -0,0 +1,131 @@
+# get-dataset-openimages-calibration
+Automatically generated README for this automation recipe: **get-dataset-openimages-calibration**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages-calibration/_cm.yaml)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get dataset openimages calibration" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,dataset,openimages,calibration[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get dataset openimages calibration [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,dataset,openimages,calibration'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get dataset openimages calibration[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_filter`
+ - ENV variables:
+ - CM_CALIBRATE_FILTER: `yes`
+
+
+
+
+ * Group "**calibration-option**"
+
+ Click here to expand this section.
+
+ * **`_mlperf.option1`** (default)
+ - ENV variables:
+ - CM_MLPERF_OPENIMAGES_CALIBRATION_OPTION: `one`
+ - CM_DOWNLOAD_CHECKSUM1: `f09719174af3553119e2c621157773a6`
+
+
+
+
+ * Group "**filter-size**"
+
+ Click here to expand this section.
+
+ * `_filter-size.#`
+ - ENV variables:
+ - CM_CALIBRATION_FILTER_SIZE: `#`
+ * `_filter-size.400`
+ - ENV variables:
+ - CM_CALIBRATION_FILTER_SIZE: `400`
+
+
+
+
+ ##### Default variations
+
+ `_mlperf.option1`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-filter.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages-calibration/run-filter.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get dataset openimages calibration [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages/index.md
new file mode 100644
index 0000000000..5c9e2fa595
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages/index.md
@@ -0,0 +1,164 @@
+# get-dataset-openimages
+Automatically generated README for this automation recipe: **get-dataset-openimages**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get dataset openimages open-images object-detection original" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,dataset,openimages,open-images,object-detection,original[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get dataset openimages open-images object-detection original [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,dataset,openimages,open-images,object-detection,original'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get dataset openimages open-images object-detection original[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_filter`
+ * `_filter-size.#`
+ * `_using-fiftyone`
+
+
+
+
+ * Group "**annotations**"
+
+ Click here to expand this section.
+
+ * `_custom-annotations`
+ - ENV variables:
+ - CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS: `yes`
+ * **`_default-annotations`** (default)
+ - ENV variables:
+ - CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS: `no`
+
+
+
+
+ * Group "**dataset-type**"
+
+ Click here to expand this section.
+
+ * `_calibration`
+ - ENV variables:
+ - CM_DATASET_CALIBRATION: `yes`
+ * **`_validation`** (default)
+ - ENV variables:
+ - CM_DATASET_CALIBRATION: `no`
+
+
+
+
+ * Group "**size**"
+
+ Click here to expand this section.
+
+ * **`_50`** (default)
+ - ENV variables:
+ - CM_DATASET_SIZE: `50`
+ * `_500`
+ - ENV variables:
+ - CM_DATASET_SIZE: `500`
+ * `_full`
+ - ENV variables:
+ - CM_DATASET_SIZE: ``
+ * `_size.#`
+ - ENV variables:
+ - CM_DATASET_SIZE: `#`
+
+
+
+
+ ##### Default variations
+
+ `_50,_default-annotations,_validation`
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_DATASET_CALIBRATION: `no`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages/run.bat)
+___
+#### Script output
+```bash
+cmr "get dataset openimages open-images object-detection original [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openorca/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openorca/index.md
new file mode 100644
index 0000000000..a437ae42cb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openorca/index.md
@@ -0,0 +1,132 @@
+# get-dataset-openorca
+Automatically generated README for this automation recipe: **get-dataset-openorca**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openorca/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get dataset openorca language-processing original" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,dataset,openorca,language-processing,original[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get dataset openorca language-processing original [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,dataset,openorca,language-processing,original'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get dataset openorca language-processing original[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**dataset-type**"
+
+ Click here to expand this section.
+
+ * `_calibration`
+ - ENV variables:
+ - CM_DATASET_CALIBRATION: `yes`
+ * **`_validation`** (default)
+ - ENV variables:
+ - CM_DATASET_CALIBRATION: `no`
+
+
+
+
+ * Group "**size**"
+
+ Click here to expand this section.
+
+ * `_500`
+ - ENV variables:
+ - CM_DATASET_SIZE: `500`
+ * **`_60`** (default)
+ - ENV variables:
+ - CM_DATASET_SIZE: `60`
+ * `_full`
+ - ENV variables:
+ - CM_DATASET_SIZE: `24576`
+ * `_size.#`
+ - ENV variables:
+ - CM_DATASET_SIZE: `#`
+
+
+
+
+ ##### Default variations
+
+ `_60,_validation`
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_DATASET_CALIBRATION: `no`
+
+
+
+___
+#### Script output
+```bash
+cmr "get dataset openorca language-processing original [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-squad-vocab/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-squad-vocab/index.md
new file mode 100644
index 0000000000..30e0fbeeee
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-squad-vocab/index.md
@@ -0,0 +1,105 @@
+# get-dataset-squad-vocab
+Automatically generated README for this automation recipe: **get-dataset-squad-vocab**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-squad-vocab/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get aux dataset-aux language-processing squad-aux vocab squad-vocab" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,aux,dataset-aux,language-processing,squad-aux,vocab,squad-vocab[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get aux dataset-aux language-processing squad-aux vocab squad-vocab [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,aux,dataset-aux,language-processing,squad-aux,vocab,squad-vocab'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get aux dataset-aux language-processing squad-aux vocab squad-vocab[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**download-source**"
+
+ Click here to expand this section.
+
+ * **`_from.zenodo`** (default)
+ - ENV variables:
+ - CM_WGET_URL: `https://zenodo.org/record/3733868/files/vocab.txt`
+
+
+
+
+ ##### Default variations
+
+ `_from.zenodo`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-squad-vocab/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get aux dataset-aux language-processing squad-aux vocab squad-vocab [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-squad/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-squad/index.md
new file mode 100644
index 0000000000..554e79a575
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-squad/index.md
@@ -0,0 +1,92 @@
+# get-dataset-squad
+Automatically generated README for this automation recipe: **get-dataset-squad**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-squad/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-squad/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get dataset squad language-processing validation original" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,dataset,squad,language-processing,validation,original
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get dataset squad language-processing validation original "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,dataset,squad,language-processing,validation,original'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get dataset squad language-processing validation original"
+ ```
+___
+
+#### Versions
+Default version: `1.1`
+
+* `1.1`
+* `2.0`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-squad/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get dataset squad language-processing validation original " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-criteo/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-criteo/index.md
new file mode 100644
index 0000000000..c75f70bbff
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-criteo/index.md
@@ -0,0 +1,137 @@
+# get-preprocessed-dataset-criteo
+Automatically generated README for this automation recipe: **get-preprocessed-dataset-criteo**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-criteo/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-criteo/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get dataset criteo recommendation dlrm preprocessed" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,dataset,criteo,recommendation,dlrm,preprocessed[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get dataset criteo recommendation dlrm preprocessed [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,dataset,criteo,recommendation,dlrm,preprocessed'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get dataset criteo recommendation dlrm preprocessed[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_1`
+ - ENV variables:
+ - CM_DATASET_SIZE: `1`
+ * `_50`
+ - ENV variables:
+ - CM_DATASET_SIZE: `50`
+ * `_fake`
+ - ENV variables:
+ - CM_CRITEO_FAKE: `yes`
+ * `_full`
+ * `_validation`
+
+
+
+
+ * Group "**type**"
+
+ Click here to expand this section.
+
+ * **`_multihot`** (default)
+ - ENV variables:
+ - CM_DATASET_CRITEO_MULTIHOT: `yes`
+
+
+
+
+ ##### Default variations
+
+ `_multihot`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--dir=value` → `CM_DATASET_PREPROCESSED_PATH=value`
+ * `--output_dir=value` → `CM_DATASET_PREPROCESSED_OUTPUT_PATH=value`
+ * `--threads=value` → `CM_NUM_PREPROCESS_THREADS=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-multihot.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-criteo/run-multihot.sh)
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-criteo/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get dataset criteo recommendation dlrm preprocessed [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-generic/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-generic/index.md
new file mode 100644
index 0000000000..844e2c2e8c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-generic/index.md
@@ -0,0 +1,80 @@
+# get-preprocesser-script-generic
+Automatically generated README for this automation recipe: **get-preprocesser-script-generic**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocesser-script-generic/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get preprocessor generic image-preprocessor script" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,preprocessor,generic,image-preprocessor,script
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get preprocessor generic image-preprocessor script "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,preprocessor,generic,image-preprocessor,script'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get preprocessor generic image-preprocessor script"
+ ```
+___
+
+
+___
+#### Script output
+```bash
+cmr "get preprocessor generic image-preprocessor script " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-imagenet/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-imagenet/index.md
new file mode 100644
index 0000000000..c4bee08bce
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-imagenet/index.md
@@ -0,0 +1,301 @@
+# get-preprocessed-dataset-imagenet
+Automatically generated README for this automation recipe: **get-preprocessed-dataset-imagenet**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-imagenet/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-imagenet/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get dataset imagenet ILSVRC image-classification preprocessed" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,dataset,imagenet,ILSVRC,image-classification,preprocessed[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get dataset imagenet ILSVRC image-classification preprocessed [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,dataset,imagenet,ILSVRC,image-classification,preprocessed'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get dataset imagenet ILSVRC image-classification preprocessed[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_default`
+ * `_pytorch`
+ - ENV variables:
+ - CM_PREPROCESS_PYTORCH: `yes`
+ - CM_MODEL: `resnet50`
+ * `_tflite_tpu`
+ - ENV variables:
+ - CM_MODEL: `resnet50`
+ - CM_PREPROCESS_TFLITE_TPU: `yes`
+
+
+
+
+ * Group "**calibration-option**"
+
+ Click here to expand this section.
+
+ * `_mlperf.option1`
+ - ENV variables:
+ - CM_DATASET_CALIBRATION_OPTION: `one`
+ * `_mlperf.option2`
+ - ENV variables:
+ - CM_DATASET_CALIBRATION_OPTION: `two`
+
+
+
+
+ * Group "**dataset-type**"
+
+ Click here to expand this section.
+
+ * `_calibration`
+ - ENV variables:
+ - CM_DATASET_TYPE: `calibration`
+ * **`_validation`** (default)
+ - ENV variables:
+ - CM_DATASET_TYPE: `validation`
+
+
+
+
+ * Group "**extension**"
+
+ Click here to expand this section.
+
+ * `_rgb32`
+ - ENV variables:
+ - CM_DATASET_PREPROCESSED_EXTENSION: `rgb32`
+ * `_rgb8`
+ - ENV variables:
+ - CM_DATASET_PREPROCESSED_EXTENSION: `rgb8`
+
+
+
+
+ * Group "**interpolation-method**"
+
+ Click here to expand this section.
+
+ * `_inter.area`
+ - ENV variables:
+ - CM_DATASET_INTERPOLATION_METHOD: `INTER_AREA`
+ * `_inter.linear`
+ - ENV variables:
+ - CM_DATASET_INTERPOLATION_METHOD: `INTER_LINEAR`
+
+
+
+
+ * Group "**layout**"
+
+ Click here to expand this section.
+
+ * **`_NCHW`** (default)
+ - ENV variables:
+ - CM_DATASET_DATA_LAYOUT: `NCHW`
+ * `_NHWC`
+ - ENV variables:
+ - CM_DATASET_DATA_LAYOUT: `NHWC`
+
+
+
+
+ * Group "**model**"
+
+ Click here to expand this section.
+
+ * `_for.mobilenet`
+ * `_for.resnet50`
+ - ENV variables:
+ - CM_DATASET_SUBTRACT_MEANS: `1`
+ - CM_DATASET_GIVEN_CHANNEL_MEANS: `123.68 116.78 103.94`
+ - CM_DATASET_NORMALIZE_DATA: `0`
+ - CM_DATASET_INTERPOLATION_METHOD: `INTER_AREA`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * `_float32`
+ - ENV variables:
+ - CM_DATASET_DATA_TYPE: `float32`
+ - CM_DATASET_QUANTIZE: `0`
+ - CM_DATASET_CONVERT_TO_UNSIGNED: `0`
+ * `_int8`
+ - ENV variables:
+ - CM_DATASET_DATA_TYPE: `int8`
+ - CM_DATASET_QUANTIZE: `1`
+ - CM_DATASET_CONVERT_TO_UNSIGNED: `0`
+ * `_uint8`
+ - ENV variables:
+ - CM_DATASET_DATA_TYPE: `uint8`
+ - CM_DATASET_DATA_TYPE_INPUT: `float32`
+ - CM_DATASET_QUANTIZE: `1`
+ - CM_DATASET_CONVERT_TO_UNSIGNED: `1`
+
+
+
+
+ * Group "**preprocessing-source**"
+
+ Click here to expand this section.
+
+ * `_generic-preprocessor`
+ - ENV variables:
+ - CM_DATASET_REFERENCE_PREPROCESSOR: `0`
+ * **`_mlcommons-reference-preprocessor`** (default)
+ - ENV variables:
+ - CM_DATASET_REFERENCE_PREPROCESSOR: `1`
+
+
+
+
+ * Group "**resolution**"
+
+ Click here to expand this section.
+
+ * `_resolution.#`
+ - ENV variables:
+ - CM_DATASET_INPUT_SQUARE_SIDE: `#`
+ * **`_resolution.224`** (default)
+ - ENV variables:
+ - CM_DATASET_INPUT_SQUARE_SIDE: `224`
+
+
+
+
+ * Group "**size**"
+
+ Click here to expand this section.
+
+ * `_1`
+ - ENV variables:
+ - CM_DATASET_SIZE: `1`
+ * `_500`
+ - ENV variables:
+ - CM_DATASET_SIZE: `500`
+ * `_full`
+ - ENV variables:
+ - CM_DATASET_SIZE: `50000`
+ * `_size.#`
+ - ENV variables:
+ - CM_DATASET_SIZE: `#`
+
+
+
+
+ ##### Default variations
+
+ `_NCHW,_mlcommons-reference-preprocessor,_resolution.224,_validation`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--dir=value` → `CM_DATASET_PREPROCESSED_PATH=value`
+ * `--imagenet_path=value` → `CM_IMAGENET_PATH=value`
+ * `--imagenet_preprocessed_path=value` → `CM_IMAGENET_PREPROCESSED_PATH=value`
+ * `--threads=value` → `CM_NUM_PREPROCESS_THREADS=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_DATASET_CROP_FACTOR: `87.5`
+ * CM_DATASET_DATA_TYPE: `float32`
+ * CM_DATASET_DATA_LAYOUT: `NCHW`
+ * CM_DATASET_QUANT_SCALE: `1`
+ * CM_DATASET_QUANTIZE: `0`
+ * CM_DATASET_QUANT_OFFSET: `0`
+ * CM_DATASET_PREPROCESSED_EXTENSION: `npy`
+ * CM_DATASET_CONVERT_TO_UNSIGNED: `0`
+ * CM_DATASET_REFERENCE_PREPROCESSOR: `1`
+ * CM_PREPROCESS_VGG: `yes`
+ * CM_MODEL: `resnet50`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-imagenet/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-imagenet/run.bat)
+___
+#### Script output
+```bash
+cmr "get dataset imagenet ILSVRC image-classification preprocessed [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-kits19/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-kits19/index.md
new file mode 100644
index 0000000000..d2a985eca5
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-kits19/index.md
@@ -0,0 +1,175 @@
+# get-preprocessed-dataset-kits19
+Automatically generated README for this automation recipe: **get-preprocessed-dataset-kits19**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-kits19/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get dataset medical-imaging kits19 preprocessed" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,dataset,medical-imaging,kits19,preprocessed[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get dataset medical-imaging kits19 preprocessed [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,dataset,medical-imaging,kits19,preprocessed'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get dataset medical-imaging kits19 preprocessed[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_nvidia`
+ - ENV variables:
+ - CM_PREPROCESSING_BY_NVIDIA: `yes`
+
+
+
+
+ * Group "**dataset-count**"
+
+ Click here to expand this section.
+
+ * `_1`
+ - ENV variables:
+ - CM_DATASET_SIZE: `1`
+ * `_5`
+ - ENV variables:
+ - CM_DATASET_SIZE: `5`
+ * `_50`
+ - ENV variables:
+ - CM_DATASET_SIZE: `50`
+ * `_500`
+ - ENV variables:
+ - CM_DATASET_SIZE: `500`
+ * `_full`
+ - ENV variables:
+ - CM_DATASET_SIZE: ``
+
+
+
+
+ * Group "**dataset-precision**"
+
+ Click here to expand this section.
+
+ * **`_fp32`** (default)
+ - ENV variables:
+ - CM_DATASET_DTYPE: `fp32`
+ * `_int8`
+ - ENV variables:
+ - CM_DATASET_DTYPE: `int8`
+
+
+
+
+ * Group "**dataset-type**"
+
+ Click here to expand this section.
+
+ * `_calibration`
+ - ENV variables:
+ - CM_DATASET_PATH: `<<>>`
+ * **`_validation`** (default)
+
+
+
+
+ ##### Default variations
+
+ `_fp32,_validation`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--dir=value` → `CM_DATASET_PREPROCESSED_PATH=value`
+ * `--threads=value` → `CM_NUM_PREPROCESS_THREADS=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_DATASET: `kits19`
+ * CM_DATASET_DTYPE: `fp32`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-kits19/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get dataset medical-imaging kits19 preprocessed [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-librispeech/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-librispeech/index.md
new file mode 100644
index 0000000000..f683a8f528
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-librispeech/index.md
@@ -0,0 +1,164 @@
+# get-preprocessed-dataset-librispeech
+Automatically generated README for this automation recipe: **get-preprocessed-dataset-librispeech**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-librispeech/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get dataset speech-recognition librispeech preprocessed" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,dataset,speech-recognition,librispeech,preprocessed[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get dataset speech-recognition librispeech preprocessed [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,dataset,speech-recognition,librispeech,preprocessed'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get dataset speech-recognition librispeech preprocessed[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**dataset-count**"
+
+ Click here to expand this section.
+
+ * `_1`
+ - ENV variables:
+ - CM_DATASET_SIZE: `1`
+ * `_5`
+ - ENV variables:
+ - CM_DATASET_SIZE: `5`
+ * `_50`
+ - ENV variables:
+ - CM_DATASET_SIZE: `50`
+ * `_500`
+ - ENV variables:
+ - CM_DATASET_SIZE: `500`
+ * `_full`
+ - ENV variables:
+ - CM_DATASET_SIZE: ``
+
+
+
+
+ * Group "**dataset-precision**"
+
+ Click here to expand this section.
+
+ * **`_fp32`** (default)
+ - ENV variables:
+ - CM_DATASET_DTYPE: `fp32`
+ * `_int8`
+ - ENV variables:
+ - CM_DATASET_DTYPE: `int8`
+
+
+
+
+ * Group "**dataset-type**"
+
+ Click here to expand this section.
+
+ * `_calibration`
+ - ENV variables:
+ - CM_DATASET_PATH: `<<>>`
+ * **`_validation`** (default)
+
+
+
+
+ ##### Default variations
+
+ `_fp32,_validation`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--dir=value` → `CM_DATASET_PREPROCESSED_PATH=value`
+ * `--threads=value` → `CM_NUM_PREPROCESS_THREADS=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_DATASET: `kits19`
+ * CM_DATASET_DTYPE: `fp32`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-librispeech/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get dataset speech-recognition librispeech preprocessed [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-openimages/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-openimages/index.md
new file mode 100644
index 0000000000..9bbe30eec1
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-openimages/index.md
@@ -0,0 +1,287 @@
+# get-preprocessed-dataset-openimages
+Automatically generated README for this automation recipe: **get-preprocessed-dataset-openimages**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-openimages/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-openimages/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get dataset openimages open-images object-detection preprocessed" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,dataset,openimages,open-images,object-detection,preprocessed[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get dataset openimages open-images object-detection preprocessed [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,dataset,openimages,open-images,object-detection,preprocessed'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get dataset openimages open-images object-detection preprocessed[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_filter`
+ * `_for.retinanet.onnx`
+ - ENV variables:
+ - CM_ML_MODEL_NAME: `retinanet`
+ - CM_DATASET_SUBTRACT_MEANS: `1`
+ - CM_DATASET_GIVEN_CHANNEL_MEANS: `0.485 0.456 0.406`
+ - CM_DATASET_GIVEN_CHANNEL_STDS: `0.229 0.224 0.225`
+ - CM_DATASET_NORMALIZE_DATA: `0`
+ - CM_DATASET_NORMALIZE_LOWER: `0.0`
+ - CM_DATASET_NORMALIZE_UPPER: `1.0`
+ - CM_DATASET_CONVERT_TO_BGR: `0`
+ - CM_DATASET_CROP_FACTOR: `100.0`
+ * `_nvidia`
+ - ENV variables:
+ - CM_PREPROCESSING_BY_NVIDIA: `yes`
+ * `_quant-offset.#`
+ * `_quant-scale.#`
+
+
+
+
+ * Group "**annotations**"
+
+ Click here to expand this section.
+
+ * `_custom-annotations`
+ * **`_default-annotations`** (default)
+
+
+
+
+ * Group "**dataset-count**"
+
+ Click here to expand this section.
+
+ * **`_50`** (default)
+ - ENV variables:
+ - CM_DATASET_SIZE: `50`
+ * `_500`
+ - ENV variables:
+ - CM_DATASET_SIZE: `500`
+ * `_full`
+ * `_size.#`
+ - ENV variables:
+ - CM_DATASET_SIZE: `#`
+
+
+
+
+ * Group "**dataset-layout**"
+
+ Click here to expand this section.
+
+ * **`_NCHW`** (default)
+ - ENV variables:
+ - CM_DATASET_DATA_LAYOUT: `NCHW`
+ * `_NHWC`
+ - ENV variables:
+ - CM_DATASET_DATA_LAYOUT: `NHWC`
+
+
+
+
+ * Group "**dataset-precision**"
+
+ Click here to expand this section.
+
+ * **`_fp32`** (default)
+ - ENV variables:
+ - CM_DATASET_DTYPE: `fp32`
+ - CM_DATASET_INPUT_DTYPE: `fp32`
+ - CM_DATASET_QUANTIZE: `0`
+ - CM_DATASET_CONVERT_TO_UNSIGNED: `0`
+ * `_int8`
+ - ENV variables:
+ - CM_DATASET_DTYPE: `int8`
+ - CM_DATASET_INPUT_DTYPE: `fp32`
+ - CM_DATASET_QUANTIZE: `1`
+ - CM_DATASET_CONVERT_TO_UNSIGNED: `0`
+ * `_uint8`
+ - ENV variables:
+ - CM_DATASET_DTYPE: `uint8`
+ - CM_DATASET_INPUT_DTYPE: `fp32`
+ - CM_DATASET_QUANTIZE: `1`
+ - CM_DATASET_CONVERT_TO_UNSIGNED: `1`
+
+
+
+
+ * Group "**dataset-type**"
+
+ Click here to expand this section.
+
+ * `_calibration`
+ - ENV variables:
+ - CM_DATASET_PATH: `<<>>`
+ - CM_DATASET_ANNOTATIONS_FILE_PATH: `<<>>`
+ - CM_DATASET_TYPE: `calibration`
+ * **`_validation`** (default)
+ - ENV variables:
+ - CM_DATASET_TYPE: `validation`
+
+
+
+
+ * Group "**extension**"
+
+ Click here to expand this section.
+
+ * `_npy`
+ - ENV variables:
+ - CM_DATASET_PREPROCESSED_EXTENSION: `npy`
+ * `_raw`
+ - ENV variables:
+ - CM_DATASET_PREPROCESSED_EXTENSION: `raw`
+ * `_rgb32`
+ - ENV variables:
+ - CM_DATASET_PREPROCESSED_EXTENSION: `rgb32`
+ * `_rgb8`
+ - ENV variables:
+ - CM_DATASET_PREPROCESSED_EXTENSION: `rgb8`
+
+
+
+
+ * Group "**filter-size**"
+
+ Click here to expand this section.
+
+ * `_filter-size.#`
+
+
+
+
+ * Group "**interpolation-method**"
+
+ Click here to expand this section.
+
+ * `_inter.area`
+ - ENV variables:
+ - CM_DATASET_INTERPOLATION_METHOD: `INTER_AREA`
+ * `_inter.linear`
+ - ENV variables:
+ - CM_DATASET_INTERPOLATION_METHOD: `INTER_LINEAR`
+
+
+
+
+ * Group "**preprocessing-source**"
+
+ Click here to expand this section.
+
+ * `_generic-preprocessor`
+ - ENV variables:
+ - CM_DATASET_REFERENCE_PREPROCESSOR: `0`
+ * **`_mlcommons-reference-preprocessor`** (default)
+ - ENV variables:
+ - CM_DATASET_REFERENCE_PREPROCESSOR: `1`
+
+
+
+
+ ##### Default variations
+
+ `_50,_NCHW,_default-annotations,_fp32,_mlcommons-reference-preprocessor,_validation`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--dir=value` → `CM_DATASET_PREPROCESSED_PATH=value`
+ * `--threads=value` → `CM_NUM_PREPROCESS_THREADS=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_DATASET: `OPENIMAGES`
+ * CM_DATASET_DTYPE: `fp32`
+ * CM_DATASET_INPUT_SQUARE_SIDE: `800`
+ * CM_DATASET_CROP_FACTOR: `100.0`
+ * CM_DATASET_QUANT_SCALE: `1`
+ * CM_DATASET_QUANTIZE: `0`
+ * CM_DATASET_QUANT_OFFSET: `0`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-openimages/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-openimages/run.bat)
+___
+#### Script output
+```bash
+cmr "get dataset openimages open-images object-detection preprocessed [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-openorca/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-openorca/index.md
new file mode 100644
index 0000000000..5232eaf726
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-openorca/index.md
@@ -0,0 +1,129 @@
+# get-preprocessed-dataset-openorca
+Automatically generated README for this automation recipe: **get-preprocessed-dataset-openorca**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-openorca/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get dataset openorca language-processing preprocessed" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,dataset,openorca,language-processing,preprocessed[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get dataset openorca language-processing preprocessed [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,dataset,openorca,language-processing,preprocessed'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get dataset openorca language-processing preprocessed[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**dataset-type**"
+
+ Click here to expand this section.
+
+ * `_calibration`
+ - ENV variables:
+ - CM_DATASET_CALIBRATION: `yes`
+ * **`_validation`** (default)
+ - ENV variables:
+ - CM_DATASET_CALIBRATION: `no`
+
+
+
+
+ * Group "**size**"
+
+ Click here to expand this section.
+
+ * **`_60`** (default)
+ * `_full`
+ * `_size.#`
+
+
+
+
+ ##### Default variations
+
+ `_60,_validation`
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_DATASET_CALIBRATION: `no`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-openorca/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get dataset openorca language-processing preprocessed [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-squad/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-squad/index.md
new file mode 100644
index 0000000000..422bbd9116
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-squad/index.md
@@ -0,0 +1,165 @@
+# get-preprocessed-dataset-squad
+Automatically generated README for this automation recipe: **get-preprocessed-dataset-squad**
+
+Category: **[AI/ML datasets](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-squad/_cm.yaml)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get dataset preprocessed tokenized squad" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,dataset,preprocessed,tokenized,squad[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get dataset preprocessed tokenized squad [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,dataset,preprocessed,tokenized,squad'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get dataset preprocessed tokenized squad[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**calibration-set**"
+
+ Click here to expand this section.
+
+ * `_calib1`
+ - ENV variables:
+ - CM_DATASET_SQUAD_CALIBRATION_SET: `one`
+ * `_calib2`
+ - ENV variables:
+ - CM_DATASET_SQUAD_CALIBRATION_SET: `two`
+ * **`_no-calib`** (default)
+ - ENV variables:
+ - CM_DATASET_SQUAD_CALIBRATION_SET: ``
+
+
+
+
+ * Group "**doc-stride**"
+
+ Click here to expand this section.
+
+ * `_doc-stride.#`
+ - ENV variables:
+ - CM_DATASET_DOC_STRIDE: `#`
+ * **`_doc-stride.128`** (default)
+ - ENV variables:
+ - CM_DATASET_DOC_STRIDE: `128`
+
+
+
+
+ * Group "**packing**"
+
+ Click here to expand this section.
+
+ * `_packed`
+ - ENV variables:
+ - CM_DATASET_SQUAD_PACKED: `yes`
+
+
+
+
+ * Group "**raw**"
+
+ Click here to expand this section.
+
+ * `_pickle`
+ - ENV variables:
+ - CM_DATASET_RAW: `no`
+ * **`_raw`** (default)
+ - ENV variables:
+ - CM_DATASET_RAW: `yes`
+
+
+
+
+ * Group "**seq-length**"
+
+ Click here to expand this section.
+
+ * `_seq-length.#`
+ - ENV variables:
+ - CM_DATASET_MAX_SEQ_LENGTH: `#`
+ * **`_seq-length.384`** (default)
+ - ENV variables:
+ - CM_DATASET_MAX_SEQ_LENGTH: `384`
+
+
+
+
+ ##### Default variations
+
+ `_doc-stride.128,_no-calib,_raw,_seq-length.384`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-packed.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-squad/run-packed.sh)
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-squad/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get dataset preprocessed tokenized squad [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/index.md
new file mode 100644
index 0000000000..8e94f60731
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/index.md
@@ -0,0 +1,27 @@
+* [get-croissant](get-croissant/index.md)
+* [get-dataset-cifar10](get-dataset-cifar10/index.md)
+* [get-dataset-cnndm](get-dataset-cnndm/index.md)
+* [get-dataset-coco](get-dataset-coco/index.md)
+* [get-dataset-coco2014](get-dataset-coco2014/index.md)
+* [get-dataset-criteo](get-dataset-criteo/index.md)
+* [get-dataset-imagenet-aux](get-dataset-imagenet-aux/index.md)
+* [get-dataset-imagenet-calibration](get-dataset-imagenet-calibration/index.md)
+* [get-dataset-imagenet-helper](get-dataset-imagenet-helper/index.md)
+* [get-dataset-imagenet-train](get-dataset-imagenet-train/index.md)
+* [get-dataset-imagenet-val](get-dataset-imagenet-val/index.md)
+* [get-dataset-kits19](get-dataset-kits19/index.md)
+* [get-dataset-librispeech](get-dataset-librispeech/index.md)
+* [get-dataset-openimages](get-dataset-openimages/index.md)
+* [get-dataset-openimages-annotations](get-dataset-openimages-annotations/index.md)
+* [get-dataset-openimages-calibration](get-dataset-openimages-calibration/index.md)
+* [get-dataset-openorca](get-dataset-openorca/index.md)
+* [get-dataset-squad](get-dataset-squad/index.md)
+* [get-dataset-squad-vocab](get-dataset-squad-vocab/index.md)
+* [get-preprocessed-dataset-criteo](get-preprocessed-dataset-criteo/index.md)
+* [get-preprocessed-dataset-imagenet](get-preprocessed-dataset-imagenet/index.md)
+* [get-preprocessed-dataset-kits19](get-preprocessed-dataset-kits19/index.md)
+* [get-preprocessed-dataset-librispeech](get-preprocessed-dataset-librispeech/index.md)
+* [get-preprocessed-dataset-openimages](get-preprocessed-dataset-openimages/index.md)
+* [get-preprocessed-dataset-openorca](get-preprocessed-dataset-openorca/index.md)
+* [get-preprocessed-dataset-squad](get-preprocessed-dataset-squad/index.md)
+* [get-preprocessed-dataset-generic](get-preprocessed-dataset-generic/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-google-saxml/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-google-saxml/index.md
new file mode 100644
index 0000000000..d0a9d44361
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-google-saxml/index.md
@@ -0,0 +1,89 @@
+# get-google-saxml
+Automatically generated README for this automation recipe: **get-google-saxml**
+
+Category: **[AI/ML frameworks](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-google-saxml/_cm.yaml)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get google saxml" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,google,saxml
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get google saxml "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,google,saxml'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get google saxml"
+ ```
+___
+
+#### Versions
+Default version: `master`
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-google-saxml/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-google-saxml/run.bat)
+___
+#### Script output
+```bash
+cmr "get google saxml " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-onnxruntime-prebuilt/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-onnxruntime-prebuilt/index.md
new file mode 100644
index 0000000000..04e0b03807
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-onnxruntime-prebuilt/index.md
@@ -0,0 +1,111 @@
+# get-onnxruntime-prebuilt
+Automatically generated README for this automation recipe: **get-onnxruntime-prebuilt**
+
+Category: **[AI/ML frameworks](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-onnxruntime-prebuilt/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install onnxruntime get prebuilt lib lang-c lang-cpp" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,onnxruntime,get,prebuilt,lib,lang-c,lang-cpp[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install onnxruntime get prebuilt lib lang-c lang-cpp [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,onnxruntime,get,prebuilt,lib,lang-c,lang-cpp'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install onnxruntime get prebuilt lib lang-c lang-cpp[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**device**"
+
+ Click here to expand this section.
+
+ * **`_cpu`** (default)
+ - ENV variables:
+ - CM_ONNXRUNTIME_DEVICE: ``
+ * `_cuda`
+ - ENV variables:
+ - CM_ONNXRUNTIME_DEVICE: `gpu`
+
+
+
+
+ ##### Default variations
+
+ `_cpu`
+#### Versions
+Default version: `1.16.3`
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-onnxruntime-prebuilt/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-onnxruntime-prebuilt/run.bat)
+___
+#### Script output
+```bash
+cmr "install onnxruntime get prebuilt lib lang-c lang-cpp [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-apps-sdk/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-apps-sdk/index.md
new file mode 100644
index 0000000000..613a95510a
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-apps-sdk/index.md
@@ -0,0 +1,80 @@
+# get-qaic-apps-sdk
+Automatically generated README for this automation recipe: **get-qaic-apps-sdk**
+
+Category: **[AI/ML frameworks](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-qaic-apps-sdk/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get detect qaic apps sdk apps-sdk qaic-apps-sdk" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,detect,qaic,apps,sdk,apps-sdk,qaic-apps-sdk
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get detect qaic apps sdk apps-sdk qaic-apps-sdk "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,detect,qaic,apps,sdk,apps-sdk,qaic-apps-sdk'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get detect qaic apps sdk apps-sdk qaic-apps-sdk"
+ ```
+___
+
+
+___
+#### Script output
+```bash
+cmr "get detect qaic apps sdk apps-sdk qaic-apps-sdk " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-platform-sdk/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-platform-sdk/index.md
new file mode 100644
index 0000000000..7a55997153
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-platform-sdk/index.md
@@ -0,0 +1,80 @@
+# get-qaic-platform-sdk
+Automatically generated README for this automation recipe: **get-qaic-platform-sdk**
+
+Category: **[AI/ML frameworks](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-qaic-platform-sdk/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get detect qaic platform sdk platform-sdk qaic-platform-sdk" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,detect,qaic,platform,sdk,platform-sdk,qaic-platform-sdk
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get detect qaic platform sdk platform-sdk qaic-platform-sdk "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,detect,qaic,platform,sdk,platform-sdk,qaic-platform-sdk'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get detect qaic platform sdk platform-sdk qaic-platform-sdk"
+ ```
+___
+
+
+___
+#### Script output
+```bash
+cmr "get detect qaic platform sdk platform-sdk qaic-platform-sdk " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-software-kit/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-software-kit/index.md
new file mode 100644
index 0000000000..159dc0eddd
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-software-kit/index.md
@@ -0,0 +1,119 @@
+# get-qaic-software-kit
+Automatically generated README for this automation recipe: **get-qaic-software-kit**
+
+Category: **[AI/ML frameworks](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-qaic-software-kit/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get qaic software kit qaic-software-kit" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,qaic,software,kit,qaic-software-kit[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get qaic software kit qaic-software-kit [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,qaic,software,kit,qaic-software-kit'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get qaic software kit qaic-software-kit[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_branch.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT: `#`
+
+
+
+
+ * Group "**repo-source**"
+
+ Click here to expand this section.
+
+ * `_repo.#`
+ - ENV variables:
+ - CM_GIT_URL: `#`
+ * **`_repo.quic`** (default)
+ - ENV variables:
+ - CM_GIT_URL: `https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100`
+
+
+
+
+ ##### Default variations
+
+ `_repo.quic`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-qaic-software-kit/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get qaic software kit qaic-software-kit [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-rocm/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-rocm/index.md
new file mode 100644
index 0000000000..c316892542
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-rocm/index.md
@@ -0,0 +1,86 @@
+# get-rocm
+Automatically generated README for this automation recipe: **get-rocm**
+
+Category: **[AI/ML frameworks](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-rocm/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get rocm get-rocm" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,rocm,get-rocm
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get rocm get-rocm "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,rocm,get-rocm'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get rocm get-rocm"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-rocm/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get rocm get-rocm " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-tvm/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-tvm/index.md
new file mode 100644
index 0000000000..da93153260
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-tvm/index.md
@@ -0,0 +1,141 @@
+# get-tvm
+Automatically generated README for this automation recipe: **get-tvm**
+
+Category: **[AI/ML frameworks](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tvm/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tvm/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get tvm get-tvm" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,tvm,get-tvm[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get tvm get-tvm [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,tvm,get-tvm'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get tvm get-tvm[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_cuda`
+ - ENV variables:
+ - CM_TVM_USE_CUDA: `yes`
+ * `_openmp`
+ - ENV variables:
+ - CM_TVM_USE_OPENMP: `yes`
+
+
+
+
+ * Group "**installation-type**"
+
+ Click here to expand this section.
+
+ * **`_llvm`** (default)
+ - ENV variables:
+ - CM_TVM_USE_LLVM: `yes`
+ * `_pip-install`
+ - ENV variables:
+ - CM_TVM_PIP_INSTALL: `yes`
+
+
+
+
+ ##### Default variations
+
+ `_llvm`
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_GIT_CHECKOUT: `main`
+ * CM_GIT_URL: `https://github.com/apache/tvm`
+ * CM_TVM_PIP_INSTALL: `no`
+
+
+#### Versions
+* `main`
+* `v0.10.0`
+* `v0.7.0`
+* `v0.8.0`
+* `v0.9.0`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tvm/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get tvm get-tvm [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/index.md
new file mode 100644
index 0000000000..dd8814fe13
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/index.md
@@ -0,0 +1,12 @@
+* [get-google-saxml](get-google-saxml/index.md)
+* [get-onnxruntime-prebuilt](get-onnxruntime-prebuilt/index.md)
+* [get-qaic-apps-sdk](get-qaic-apps-sdk/index.md)
+* [get-qaic-platform-sdk](get-qaic-platform-sdk/index.md)
+* [get-qaic-software-kit](get-qaic-software-kit/index.md)
+* [get-rocm](get-rocm/index.md)
+* [get-tvm](get-tvm/index.md)
+* [install-qaic-compute-sdk-from-src](install-qaic-compute-sdk-from-src/index.md)
+* [install-rocm](install-rocm/index.md)
+* [install-tensorflow-for-c](install-tensorflow-for-c/index.md)
+* [install-tensorflow-from-src](install-tensorflow-from-src/index.md)
+* [install-tflite-from-src](install-tflite-from-src/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-qaic-compute-sdk-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-qaic-compute-sdk-from-src/index.md
new file mode 100644
index 0000000000..0f04dc1490
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-qaic-compute-sdk-from-src/index.md
@@ -0,0 +1,136 @@
+# install-qaic-compute-sdk-from-src
+Automatically generated README for this automation recipe: **install-qaic-compute-sdk-from-src**
+
+Category: **[AI/ML frameworks](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-qaic-compute-sdk-from-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get qaic from.src software compute compute-sdk qaic-compute-sdk sdk" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,qaic,from.src,software,compute,compute-sdk,qaic-compute-sdk,sdk[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get qaic from.src software compute compute-sdk qaic-compute-sdk sdk [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,qaic,from.src,software,compute,compute-sdk,qaic-compute-sdk,sdk'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get qaic from.src software compute compute-sdk qaic-compute-sdk sdk[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_branch.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT: `#`
+
+
+
+
+ * Group "**installation-mode**"
+
+ Click here to expand this section.
+
+ * `_debug`
+ - ENV variables:
+ - CM_QAIC_COMPUTE_SDK_INSTALL_MODE: `debug`
+ * **`_release`** (default)
+ - ENV variables:
+ - CM_QAIC_COMPUTE_SDK_INSTALL_MODE: `release`
+ * `_release-assert`
+ - ENV variables:
+ - CM_QAIC_COMPUTE_SDK_INSTALL_MODE: `release-assert`
+
+
+
+
+ * Group "**repo-source**"
+
+ Click here to expand this section.
+
+ * `_repo.#`
+ - ENV variables:
+ - CM_GIT_URL: `#`
+ * **`_repo.quic`** (default)
+ - ENV variables:
+ - CM_GIT_URL: `https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100-cc`
+
+
+
+
+ ##### Default variations
+
+ `_release,_repo.quic`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-qaic-compute-sdk-from-src/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get qaic from.src software compute compute-sdk qaic-compute-sdk sdk [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-rocm/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-rocm/index.md
new file mode 100644
index 0000000000..74756c74d2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-rocm/index.md
@@ -0,0 +1,91 @@
+# install-rocm
+Automatically generated README for this automation recipe: **install-rocm**
+
+Category: **[AI/ML frameworks](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-rocm/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install rocm install-rocm" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,rocm,install-rocm
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install rocm install-rocm "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,rocm,install-rocm'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install rocm install-rocm"
+ ```
+___
+
+#### Versions
+Default version: `5.7.1`
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-rhel.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-rocm/run-rhel.sh)
+ * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-rocm/run-ubuntu.sh)
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-rocm/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install rocm install-rocm " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tensorflow-for-c/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tensorflow-for-c/index.md
new file mode 100644
index 0000000000..0e1a158eaa
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tensorflow-for-c/index.md
@@ -0,0 +1,89 @@
+# install-tensorflow-for-c
+Automatically generated README for this automation recipe: **install-tensorflow-for-c**
+
+Category: **[AI/ML frameworks](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tensorflow-for-c/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install tensorflow lib lang-c" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,tensorflow,lib,lang-c
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install tensorflow lib lang-c "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,tensorflow,lib,lang-c'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install tensorflow lib lang-c"
+ ```
+___
+
+#### Versions
+Default version: `2.8.0`
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tensorflow-for-c/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install tensorflow lib lang-c " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tensorflow-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tensorflow-from-src/index.md
new file mode 100644
index 0000000000..36610c1407
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tensorflow-from-src/index.md
@@ -0,0 +1,135 @@
+# install-tensorflow-from-src
+Automatically generated README for this automation recipe: **install-tensorflow-from-src**
+
+Category: **[AI/ML frameworks](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tensorflow-from-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get install tensorflow lib source from-source from-src src from.src" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,install,tensorflow,lib,source,from-source,from-src,src,from.src[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get install tensorflow lib source from-source from-src src from.src [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,install,tensorflow,lib,source,from-source,from-src,src,from.src'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get install tensorflow lib source from-source from-src src from.src[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_tflite`
+ - ENV variables:
+ - CM_TFLITE: `on`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_GIT_URL: `https://github.com/tensorflow/tensorflow`
+ * CM_GIT_DEPTH: `1`
+ * CM_TFLITE: `off`
+
+
+#### Versions
+Default version: `master`
+
+* `master`
+* `v1.15.0`
+* `v2.0.0`
+* `v2.1.0`
+* `v2.10.0`
+* `v2.11.0`
+* `v2.12.0`
+* `v2.13.0`
+* `v2.14.0`
+* `v2.15.0`
+* `v2.16.1`
+* `v2.2.0`
+* `v2.3.0`
+* `v2.4.0`
+* `v2.5.0`
+* `v2.6.0`
+* `v2.7.0`
+* `v2.8.0`
+* `v2.9.0`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tensorflow-from-src/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get install tensorflow lib source from-source from-src src from.src [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tflite-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tflite-from-src/index.md
new file mode 100644
index 0000000000..f86c93efb0
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tflite-from-src/index.md
@@ -0,0 +1,100 @@
+# install-tflite-from-src
+Automatically generated README for this automation recipe: **install-tflite-from-src**
+
+Category: **[AI/ML frameworks](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tflite-from-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get install tflite-cmake tensorflow-lite-cmake from-src" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,install,tflite-cmake,tensorflow-lite-cmake,from-src
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get install tflite-cmake tensorflow-lite-cmake from-src "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,install,tflite-cmake,tensorflow-lite-cmake,from-src'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get install tflite-cmake tensorflow-lite-cmake from-src"
+ ```
+___
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_GIT_DEPTH: `1`
+
+
+#### Versions
+Default version: `master`
+
+* `master`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tflite-from-src/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get install tflite-cmake tensorflow-lite-cmake from-src " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/convert-ml-model-huggingface-to-onnx/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/convert-ml-model-huggingface-to-onnx/index.md
new file mode 100644
index 0000000000..0cf4982dee
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/convert-ml-model-huggingface-to-onnx/index.md
@@ -0,0 +1,101 @@
+# convert-ml-model-huggingface-to-onnx
+Automatically generated README for this automation recipe: **convert-ml-model-huggingface-to-onnx**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/convert-ml-model-huggingface-to-onnx/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "ml-model model huggingface-to-onnx onnx huggingface convert" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=ml-model,model,huggingface-to-onnx,onnx,huggingface,convert[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "ml-model model huggingface-to-onnx onnx huggingface convert [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'ml-model,model,huggingface-to-onnx,onnx,huggingface,convert'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "ml-model model huggingface-to-onnx onnx huggingface convert[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_model-path.#`
+ - ENV variables:
+ - CM_MODEL_HUGG_PATH: `#`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/convert-ml-model-huggingface-to-onnx/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "ml-model model huggingface-to-onnx onnx huggingface convert [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-bert-squad-vocab/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-bert-squad-vocab/index.md
new file mode 100644
index 0000000000..ab69223ae1
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-bert-squad-vocab/index.md
@@ -0,0 +1,80 @@
+# get-bert-squad-vocab
+Automatically generated README for this automation recipe: **get-bert-squad-vocab**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-bert-squad-vocab/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get bert squad bert-large bert-squad vocab" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,bert,squad,bert-large,bert-squad,vocab
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get bert squad bert-large bert-squad vocab "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,bert,squad,bert-large,bert-squad,vocab'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get bert squad bert-large bert-squad vocab"
+ ```
+___
+
+
+___
+#### Script output
+```bash
+cmr "get bert squad bert-large bert-squad vocab " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-dlrm/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-dlrm/index.md
new file mode 100644
index 0000000000..4aa9382d10
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-dlrm/index.md
@@ -0,0 +1,118 @@
+# get-dlrm
+Automatically generated README for this automation recipe: **get-dlrm**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dlrm/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dlrm/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get src dlrm" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,src,dlrm[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get src dlrm [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,src,dlrm'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get src dlrm[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_full-history`
+ - ENV variables:
+ - CM_GIT_DEPTH: ``
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_GIT_DEPTH: `--depth 10`
+ * CM_GIT_PATCH: `no`
+ * CM_GIT_URL: `https://github.com/facebookresearch/dlrm.git`
+
+
+#### Versions
+Default version: `main`
+
+* `main`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dlrm/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get src dlrm [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-3d-unet-kits19/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-3d-unet-kits19/index.md
new file mode 100644
index 0000000000..de5fe50eb9
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-3d-unet-kits19/index.md
@@ -0,0 +1,130 @@
+# get-ml-model-3d-unet-kits19
+Automatically generated README for this automation recipe: **get-ml-model-3d-unet-kits19**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-3d-unet-kits19/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get ml-model raw 3d-unet kits19 medical-imaging" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,ml-model,raw,3d-unet,kits19,medical-imaging[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get ml-model raw 3d-unet kits19 medical-imaging [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,ml-model,raw,3d-unet,kits19,medical-imaging'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get ml-model raw 3d-unet kits19 medical-imaging[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_weights`
+ - ENV variables:
+ - CM_MODEL_WEIGHTS_FILE: `yes`
+
+
+
+
+ * Group "**framework**"
+
+ Click here to expand this section.
+
+ * **`_onnx`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_FRAMEWORK: `onnx`
+ * `_pytorch`
+ - ENV variables:
+ - CM_ML_MODEL_FRAMEWORK: `pytorch`
+ * `_tf`
+ - Aliases: `_tensorflow`
+ - ENV variables:
+ - CM_ML_MODEL_FRAMEWORK: `tensorflow`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * **`_fp32`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32`
+ - CM_ML_MODEL_PRECISION: `fp32`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32`
+
+
+
+
+ ##### Default variations
+
+ `_fp32,_onnx`
+
+___
+#### Script output
+```bash
+cmr "get ml-model raw 3d-unet kits19 medical-imaging [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-bert-base-squad/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-bert-base-squad/index.md
new file mode 100644
index 0000000000..dc07850d65
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-bert-base-squad/index.md
@@ -0,0 +1,119 @@
+# get-ml-model-bert-base-squad
+Automatically generated README for this automation recipe: **get-ml-model-bert-base-squad**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-bert-base-squad/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get ml-model raw bert bert-base bert-squad language language-processing" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,ml-model,raw,bert,bert-base,bert-squad,language,language-processing[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get ml-model raw bert bert-base bert-squad language language-processing [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,ml-model,raw,bert,bert-base,bert-squad,language,language-processing'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get ml-model raw bert bert-base bert-squad language language-processing[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**framework**"
+
+ Click here to expand this section.
+
+ * `_deepsparse`
+ - ENV variables:
+ - CM_ML_MODEL_FRAMEWORK: `deepsparse`
+ - CM_ML_MODEL_INPUT_IDS_NAME: `input_ids`
+ - CM_ML_MODEL_INPUT_MASK_NAME: `input_mask`
+ - CM_ML_MODEL_INPUT_SEGMENTS_NAME: `segment_ids`
+ - CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: `output_end_logits`
+ - CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: `output_start_logits`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * **`_fp32`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_PRECISION: `fp32`
+ * `_int8`
+ - ENV variables:
+ - CM_ML_MODEL_PRECISION: `int8`
+ - CM_ML_MODEL_QUANTIZED: `yes`
+
+
+
+
+ ##### Default variations
+
+ `_fp32`
+
+___
+#### Script output
+```bash
+cmr "get ml-model raw bert bert-base bert-squad language language-processing [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-bert-large-squad/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-bert-large-squad/index.md
new file mode 100644
index 0000000000..9ba5778b3f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-bert-large-squad/index.md
@@ -0,0 +1,188 @@
+# get-ml-model-bert-large-squad
+Automatically generated README for this automation recipe: **get-ml-model-bert-large-squad**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-bert-large-squad/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get ml-model raw bert bert-large bert-squad language language-processing" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,ml-model,raw,bert,bert-large,bert-squad,language,language-processing[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get ml-model raw bert bert-large bert-squad language language-processing [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,ml-model,raw,bert,bert-large,bert-squad,language,language-processing'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get ml-model raw bert bert-large bert-squad language language-processing[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_onnxruntime`
+ * `_tensorflow`
+
+
+
+
+ * Group "**download-source**"
+
+ Click here to expand this section.
+
+ * `_amazon-s3`
+ * `_armi`
+ * `_custom-url.#`
+ - ENV variables:
+ - CM_PACKAGE_URL: `#`
+ * `_github`
+ * `_zenodo`
+
+
+
+
+ * Group "**framework**"
+
+ Click here to expand this section.
+
+ * `_deepsparse`
+ - ENV variables:
+ - CM_ML_MODEL_FRAMEWORK: `deepsparse`
+ - CM_ML_MODEL_INPUT_IDS_NAME: `input_ids`
+ - CM_ML_MODEL_INPUT_MASK_NAME: `input_mask`
+ - CM_ML_MODEL_INPUT_SEGMENTS_NAME: `segment_ids`
+ - CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: `output_end_logits`
+ - CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: `output_start_logits`
+ * **`_onnx`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_FRAMEWORK: `onnx`
+ - CM_ML_MODEL_INPUT_IDS_NAME: `input_ids`
+ - CM_ML_MODEL_INPUT_MASK_NAME: `input_mask`
+ - CM_ML_MODEL_INPUT_SEGMENTS_NAME: `segment_ids`
+ - CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: `output_end_logits`
+ - CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: `output_start_logits`
+ * `_pytorch`
+ - ENV variables:
+ - CM_ML_MODEL_FRAMEWORK: `pytorch`
+ - CM_ML_MODEL_INPUT_IDS_NAME: `input_ids`
+ - CM_ML_MODEL_INPUT_MASK_NAME: `input_mask`
+ - CM_ML_MODEL_INPUT_SEGMENTS_NAME: `segment_ids`
+ - CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: `output_end_logits`
+ - CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: `output_start_logits`
+ * `_tf`
+ - ENV variables:
+ - CM_ML_MODEL_FRAMEWORK: `tf`
+ - CM_ML_MODEL_INPUT_IDS_NAME: `input_ids`
+ - CM_ML_MODEL_INPUT_MASK_NAME: `input_mask`
+ - CM_ML_MODEL_INPUT_SEGMENTS_NAME: `segment_ids`
+ - CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: `output_end_logits`
+ - CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: `output_start_logits`
+
+
+
+
+ * Group "**packing**"
+
+ Click here to expand this section.
+
+ * `_packed`
+ - ENV variables:
+ - CM_ML_MODEL_BERT_PACKED: `yes`
+ * **`_unpacked`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_BERT_PACKED: `no`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * **`_fp32`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_PRECISION: `fp32`
+ * `_int8`
+ - ENV variables:
+ - CM_ML_MODEL_PRECISION: `int8`
+ - CM_ML_MODEL_QUANTIZED: `yes`
+
+
+
+
+ ##### Default variations
+
+ `_fp32,_onnx,_unpacked`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-packed.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-bert-large-squad/run-packed.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get ml-model raw bert bert-large bert-squad language language-processing [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-dlrm-terabyte/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-dlrm-terabyte/index.md
new file mode 100644
index 0000000000..71138c9a6c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-dlrm-terabyte/index.md
@@ -0,0 +1,165 @@
+# get-ml-model-dlrm-terabyte
+Automatically generated README for this automation recipe: **get-ml-model-dlrm-terabyte**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-dlrm-terabyte/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get ml-model dlrm raw terabyte criteo-terabyte criteo recommendation" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,ml-model,dlrm,raw,terabyte,criteo-terabyte,criteo,recommendation[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get ml-model dlrm raw terabyte criteo-terabyte criteo recommendation [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,ml-model,dlrm,raw,terabyte,criteo-terabyte,criteo,recommendation'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get ml-model dlrm raw terabyte criteo-terabyte criteo recommendation[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_debug`
+ - ENV variables:
+ - CM_ML_MODEL_DEBUG: `yes`
+
+
+
+
+ * Group "**download-tool**"
+
+ Click here to expand this section.
+
+ * `_rclone`
+ * `_wget`
+
+
+
+
+ * Group "**framework**"
+
+ Click here to expand this section.
+
+ * `_onnx`
+ - ENV variables:
+ - CM_ML_MODEL_FRAMEWORK: `onnx`
+ * **`_pytorch`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_FRAMEWORK: `pytorch`
+ - CM_TMP_MODEL_ADDITIONAL_NAME: `dlrm_terabyte.pytorch`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * **`_fp32`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32`
+ - CM_ML_MODEL_PRECISION: `fp32`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32`
+
+
+
+
+ * Group "**type**"
+
+ Click here to expand this section.
+
+ * **`_weight_sharded`** (default)
+ - ENV variables:
+ - CM_DLRM_MULTIHOT_MODEL: `yes`
+
+
+
+
+ ##### Default variations
+
+ `_fp32,_pytorch,_weight_sharded`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--dir=value` → `CM_DOWNLOAD_PATH=value`
+ * `--download_path=value` → `CM_DOWNLOAD_PATH=value`
+ * `--to=value` → `CM_DOWNLOAD_PATH=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-dlrm-terabyte/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get ml-model dlrm raw terabyte criteo-terabyte criteo recommendation [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-efficientnet-lite/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-efficientnet-lite/index.md
new file mode 100644
index 0000000000..b95cc653f1
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-efficientnet-lite/index.md
@@ -0,0 +1,191 @@
+# get-ml-model-efficientnet-lite
+Automatically generated README for this automation recipe: **get-ml-model-efficientnet-lite**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-efficientnet-lite/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get ml-model efficientnet raw ml-model-efficientnet ml-model-efficientnet-lite lite tflite image-classification" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,ml-model,efficientnet,raw,ml-model-efficientnet,ml-model-efficientnet-lite,lite,tflite,image-classification[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get ml-model efficientnet raw ml-model-efficientnet ml-model-efficientnet-lite lite tflite image-classification [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,ml-model,efficientnet,raw,ml-model-efficientnet,ml-model-efficientnet-lite,lite,tflite,image-classification'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get ml-model efficientnet raw ml-model-efficientnet ml-model-efficientnet-lite lite tflite image-classification[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_tflite`
+
+
+
+
+ * Group "**kind**"
+
+ Click here to expand this section.
+
+ * **`_lite0`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_EFFICIENTNET_LITE_KIND: `lite0`
+ * `_lite1`
+ - ENV variables:
+ - CM_ML_MODEL_EFFICIENTNET_LITE_KIND: `lite1`
+ * `_lite2`
+ - ENV variables:
+ - CM_ML_MODEL_EFFICIENTNET_LITE_KIND: `lite2`
+ * `_lite3`
+ - ENV variables:
+ - CM_ML_MODEL_EFFICIENTNET_LITE_KIND: `lite3`
+ * `_lite4`
+ - ENV variables:
+ - CM_ML_MODEL_EFFICIENTNET_LITE_KIND: `lite4`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * **`_fp32`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_EFFICIENTNET_LITE_PRECISION: `fp32`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_PRECISION: `fp32`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32`
+ * `_uint8`
+ - Aliases: `_int8`
+ - ENV variables:
+ - CM_ML_MODEL_EFFICIENTNET_LITE_PRECISION: `int8`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `uint8`
+ - CM_ML_MODEL_PRECISION: `uint8`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `uint8`
+
+
+
+
+ * Group "**resolution**"
+
+ Click here to expand this section.
+
+ * **`_resolution-224`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_IMAGE_HEIGHT: `224`
+ - CM_ML_MODEL_IMAGE_WIDTH: `224`
+ - CM_ML_MODEL_MOBILENET_RESOLUTION: `224`
+ - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.224`
+ * `_resolution-240`
+ - ENV variables:
+ - CM_ML_MODEL_IMAGE_HEIGHT: `240`
+ - CM_ML_MODEL_IMAGE_WIDTH: `240`
+ - CM_ML_MODEL_MOBILENET_RESOLUTION: `240`
+ - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.240`
+ * `_resolution-260`
+ - ENV variables:
+ - CM_ML_MODEL_IMAGE_HEIGHT: `260`
+ - CM_ML_MODEL_IMAGE_WIDTH: `260`
+ - CM_ML_MODEL_MOBILENET_RESOLUTION: `260`
+ - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.260`
+ * `_resolution-280`
+ - ENV variables:
+ - CM_ML_MODEL_IMAGE_HEIGHT: `280`
+ - CM_ML_MODEL_IMAGE_WIDTH: `280`
+ - CM_ML_MODEL_MOBILENET_RESOLUTION: `280`
+ - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.280`
+ * `_resolution-300`
+ - ENV variables:
+ - CM_ML_MODEL_IMAGE_HEIGHT: `300`
+ - CM_ML_MODEL_IMAGE_WIDTH: `300`
+ - CM_ML_MODEL_MOBILENET_RESOLUTION: `300`
+ - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.300`
+
+
+
+
+ ##### Default variations
+
+ `_fp32,_lite0,_resolution-224`
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32`
+ * CM_ML_MODEL_PRECISION: `fp32`
+ * CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32`
+
+
+
+___
+#### Script output
+```bash
+cmr "get ml-model efficientnet raw ml-model-efficientnet ml-model-efficientnet-lite lite tflite image-classification [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-gptj/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-gptj/index.md
new file mode 100644
index 0000000000..f8ba684b19
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-gptj/index.md
@@ -0,0 +1,194 @@
+# get-ml-model-gptj
+Automatically generated README for this automation recipe: **get-ml-model-gptj**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-gptj/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get raw ml-model gptj gpt-j large-language-model" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,raw,ml-model,gptj,gpt-j,large-language-model[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get raw ml-model gptj gpt-j large-language-model [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,raw,ml-model,gptj,gpt-j,large-language-model'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get raw ml-model gptj gpt-j large-language-model[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_batch_size.#`
+ - ENV variables:
+ - CM_ML_MODEL_BATCH_SIZE: `#`
+
+
+
+
+ * Group "**download-tool**"
+
+ Click here to expand this section.
+
+ * **`_rclone`** (default)
+ - ENV variables:
+ - CM_DOWNLOAD_FILENAME: `checkpoint`
+ - CM_DOWNLOAD_URL: `<<>>`
+ * `_wget`
+ - ENV variables:
+ - CM_DOWNLOAD_URL: `<<>>`
+ - CM_DOWNLOAD_FILENAME: `checkpoint.zip`
+
+
+
+
+ * Group "**framework**"
+
+ Click here to expand this section.
+
+ * **`_pytorch`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_DATA_LAYOUT: `NCHW`
+ - CM_ML_MODEL_FRAMEWORK: `pytorch`
+ - CM_ML_STARTING_WEIGHTS_FILENAME: `<<>>`
+ * `_saxml`
+
+
+
+
+ * Group "**model-provider**"
+
+ Click here to expand this section.
+
+ * `_intel`
+ * **`_mlcommons`** (default)
+ * `_nvidia`
+ - ENV variables:
+ - CM_TMP_ML_MODEL_PROVIDER: `nvidia`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * `_fp32`
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32`
+ - CM_ML_MODEL_PRECISION: `fp32`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32`
+ * `_fp8`
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `fp8`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp8`
+ * `_int4`
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `int4`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `int4`
+ * `_int8`
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `int8`
+ - CM_ML_MODEL_PRECISION: `int8`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `int8`
+ * `_uint8`
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `uint8`
+ - CM_ML_MODEL_PRECISION: `uint8`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `uint8`
+
+
+
+
+ ##### Default variations
+
+ `_mlcommons,_pytorch,_rclone`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--checkpoint=value` → `GPTJ_CHECKPOINT_PATH=value`
+ * `--download_path=value` → `CM_DOWNLOAD_PATH=value`
+ * `--to=value` → `CM_DOWNLOAD_PATH=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-int4-calibration.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-gptj/run-int4-calibration.sh)
+ * [run-intel.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-gptj/run-intel.sh)
+ * [run-nvidia.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-gptj/run-nvidia.sh)
+ * [run-saxml-quantized.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-gptj/run-saxml-quantized.sh)
+ * [run-saxml.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-gptj/run-saxml.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get raw ml-model gptj gpt-j large-language-model [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-huggingface-zoo/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-huggingface-zoo/index.md
new file mode 100644
index 0000000000..5f5ef67fe0
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-huggingface-zoo/index.md
@@ -0,0 +1,136 @@
+# get-ml-model-huggingface-zoo
+Automatically generated README for this automation recipe: **get-ml-model-huggingface-zoo**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-huggingface-zoo/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-huggingface-zoo/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get ml-model huggingface zoo" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,ml-model,huggingface,zoo[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get ml-model huggingface zoo [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,ml-model,huggingface,zoo'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get ml-model huggingface zoo[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_model-stub.#`
+ - ENV variables:
+ - CM_MODEL_ZOO_STUB: `#`
+ * `_onnx-subfolder`
+ - ENV variables:
+ - CM_HF_SUBFOLDER: `onnx`
+ * `_pierreguillou_bert_base_cased_squad_v1.1_portuguese`
+ - ENV variables:
+ - CM_MODEL_ZOO_STUB: `pierreguillou/bert-base-cased-squad-v1.1-portuguese`
+ * `_prune`
+ - ENV variables:
+ - CM_MODEL_TASK: `prune`
+
+
+
+
+ * Group "**download-type**"
+
+ Click here to expand this section.
+
+ * `_clone-repo`
+ - ENV variables:
+ - CM_GIT_CLONE_REPO: `yes`
+
+
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--download_path=value` → `CM_DOWNLOAD_PATH=value`
+ * `--env_key=value` → `CM_MODEL_ZOO_ENV_KEY=value`
+ * `--full_subfolder=value` → `CM_HF_FULL_SUBFOLDER=value`
+ * `--model_filename=value` → `CM_MODEL_ZOO_FILENAME=value`
+ * `--revision=value` → `CM_HF_REVISION=value`
+ * `--subfolder=value` → `CM_HF_SUBFOLDER=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-huggingface-zoo/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-huggingface-zoo/run.bat)
+___
+#### Script output
+```bash
+cmr "get ml-model huggingface zoo [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-llama2/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-llama2/index.md
new file mode 100644
index 0000000000..fe9e5136a4
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-llama2/index.md
@@ -0,0 +1,161 @@
+# get-ml-model-llama2
+Automatically generated README for this automation recipe: **get-ml-model-llama2**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-llama2/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get raw ml-model language-processing llama2 llama2-70b text-summarization" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,raw,ml-model,language-processing,llama2,llama2-70b,text-summarization[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get raw ml-model language-processing llama2 llama2-70b text-summarization [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,raw,ml-model,language-processing,llama2,llama2-70b,text-summarization'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get raw ml-model language-processing llama2 llama2-70b text-summarization[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_batch_size.#`
+ - ENV variables:
+ - CM_ML_MODEL_BATCH_SIZE: `#`
+
+
+
+
+ * Group "**framework**"
+
+ Click here to expand this section.
+
+ * **`_pytorch`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_FRAMEWORK: `pytorch`
+
+
+
+
+ * Group "**huggingface-stub**"
+
+ Click here to expand this section.
+
+ * **`_meta-llama/Llama-2-70b-chat-hf`** (default)
+ - ENV variables:
+ - CM_GIT_CHECKOUT_FOLDER: `Llama-2-70b-chat-hf`
+ - CM_MODEL_ZOO_ENV_KEY: `LLAMA2`
+ * `_meta-llama/Llama-2-7b-chat-hf`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_FOLDER: `Llama-2-7b-chat-hf`
+ - CM_MODEL_ZOO_ENV_KEY: `LLAMA2`
+ * `_stub.#`
+ - ENV variables:
+ - CM_MODEL_ZOO_ENV_KEY: `LLAMA2`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * **`_fp32`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32`
+ - CM_ML_MODEL_PRECISION: `fp32`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32`
+ * `_int8`
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `int8`
+ - CM_ML_MODEL_PRECISION: `int8`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `int8`
+ * `_uint8`
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `uint8`
+ - CM_ML_MODEL_PRECISION: `uint8`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `uint8`
+
+
+
+
+ ##### Default variations
+
+ `_fp32,_meta-llama/Llama-2-70b-chat-hf,_pytorch`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--checkpoint=value` → `LLAMA2_CHECKPOINT_PATH=value`
+
+
+
+
+___
+#### Script output
+```bash
+cmr "get raw ml-model language-processing llama2 llama2-70b text-summarization [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-mobilenet/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-mobilenet/index.md
new file mode 100644
index 0000000000..e34f128e86
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-mobilenet/index.md
@@ -0,0 +1,288 @@
+# get-ml-model-mobilenet
+Automatically generated README for this automation recipe: **get-ml-model-mobilenet**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-mobilenet/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-mobilenet/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get ml-model mobilenet raw ml-model-mobilenet image-classification" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,ml-model,mobilenet,raw,ml-model-mobilenet,image-classification[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get ml-model mobilenet raw ml-model-mobilenet image-classification [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,ml-model,mobilenet,raw,ml-model-mobilenet,image-classification'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get ml-model mobilenet raw ml-model-mobilenet image-classification[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_tflite`
+
+
+
+
+ * Group "**framework**"
+
+ Click here to expand this section.
+
+ * `_onnx`
+ - ENV variables:
+ - CM_ML_MODEL_DATA_LAYOUT: `NCHW`
+ - CM_ML_MODEL_FRAMEWORK: `onnx`
+ * **`_tf`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_DATA_LAYOUT: `NHWC`
+ - CM_ML_MODEL_NORMALIZE_DATA: `yes`
+ - CM_ML_MODEL_SUBTRACT_MEANS: `no`
+ - CM_ML_MODEL_INPUT_LAYER_NAME: `input`
+
+
+
+
+ * Group "**kind**"
+
+ Click here to expand this section.
+
+ * `_large`
+ - ENV variables:
+ - CM_ML_MODEL_MOBILENET_KIND: `large`
+ * `_large-minimalistic`
+ - ENV variables:
+ - CM_ML_MODEL_MOBILENET_KIND: `large-minimalistic`
+ * `_small`
+ - ENV variables:
+ - CM_ML_MODEL_MOBILENET_KIND: `small`
+ * `_small-minimalistic`
+ - ENV variables:
+ - CM_ML_MODEL_MOBILENET_KIND: `small-minimalistic`
+
+
+
+
+ * Group "**multiplier**"
+
+ Click here to expand this section.
+
+ * `_multiplier-0.25`
+ - ENV variables:
+ - CM_ML_MODEL_MOBILENET_MULTIPLIER: `0.25`
+ - CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: `25`
+ * `_multiplier-0.35`
+ - ENV variables:
+ - CM_ML_MODEL_MOBILENET_MULTIPLIER: `0.35`
+ - CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: `35`
+ * `_multiplier-0.5`
+ - ENV variables:
+ - CM_ML_MODEL_MOBILENET_MULTIPLIER: `0.5`
+ - CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: `50`
+ * `_multiplier-0.75`
+ - ENV variables:
+ - CM_ML_MODEL_MOBILENET_MULTIPLIER: `0.75`
+ - CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: `75`
+ * `_multiplier-1.0`
+ - ENV variables:
+ - CM_ML_MODEL_MOBILENET_MULTIPLIER: `1.0`
+ - CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: `100`
+
+
+
+
+ * Group "**opset-version**"
+
+ Click here to expand this section.
+
+ * `_opset-11`
+ - ENV variables:
+ - CM_ML_MODEL_ONNX_OPSET: `11`
+ * `_opset-8`
+ - ENV variables:
+ - CM_ML_MODEL_ONNX_OPSET: `8`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * **`_fp32`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_PRECISION: `fp32`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_MOBILENET_PRECISION: `float`
+ * `_int8`
+ - ENV variables:
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `int8`
+ - CM_ML_MODEL_PRECISION: `int8`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8`
+ - CM_ML_MODEL_MOBILENET_PRECISION: `int8`
+ * `_uint8`
+ - ENV variables:
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `uint8`
+ - CM_ML_MODEL_PRECISION: `uint8`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `uint8`
+ - CM_ML_MODEL_MOBILENET_PRECISION: `uint8`
+
+
+
+
+ * Group "**resolution**"
+
+ Click here to expand this section.
+
+ * `_resolution-128`
+ - ENV variables:
+ - CM_ML_MODEL_MOBILENET_RESOLUTION: `128`
+ - CM_ML_MODEL_IMAGE_HEIGHT: `128`
+ - CM_ML_MODEL_IMAGE_WIDTH: `128`
+ - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.128`
+ * `_resolution-160`
+ - ENV variables:
+ - CM_ML_MODEL_MOBILENET_RESOLUTION: `160`
+ - CM_ML_MODEL_IMAGE_HEIGHT: `160`
+ - CM_ML_MODEL_IMAGE_WIDTH: `160`
+ - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.160`
+ * `_resolution-192`
+ - ENV variables:
+ - CM_ML_MODEL_MOBILENET_RESOLUTION: `192`
+ - CM_ML_MODEL_IMAGE_HEIGHT: `192`
+ - CM_ML_MODEL_IMAGE_WIDTH: `192`
+ - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.192`
+ * `_resolution-224`
+ - ENV variables:
+ - CM_ML_MODEL_MOBILENET_RESOLUTION: `224`
+ - CM_ML_MODEL_IMAGE_HEIGHT: `224`
+ - CM_ML_MODEL_IMAGE_WIDTH: `224`
+ - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.224`
+
+
+
+
+ * Group "**source**"
+
+ Click here to expand this section.
+
+ * `_from.google`
+ - ENV variables:
+ - CM_DOWNLOAD_SOURCE: `google`
+ * `_from.zenodo`
+ - ENV variables:
+ - CM_DOWNLOAD_SOURCE: `zenodo`
+
+
+
+
+ * Group "**version**"
+
+ Click here to expand this section.
+
+ * `_v1`
+ - ENV variables:
+ - CM_ML_MODEL_MOBILENET_VERSION: `1`
+ - CM_ML_MODEL_FULL_NAME: `mobilenet-v1-precision_<<>>-<<>>-<<>>`
+ * `_v2`
+ - ENV variables:
+ - CM_ML_MODEL_MOBILENET_VERSION: `2`
+ - CM_ML_MODEL_VER: `2`
+ - CM_ML_MODEL_FULL_NAME: `mobilenet-v2-precision_<<>>-<<>>-<<>>`
+ * **`_v3`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_MOBILENET_VERSION: `3`
+ - CM_ML_MODEL_VER: `3`
+ - CM_ML_MODEL_FULL_NAME: `mobilenet-v3-precision_<<>>-<<>>-<<>>`
+
+
+
+
+ ##### Default variations
+
+ `_fp32,_tf,_v3`
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_ML_MODEL: `mobilenet`
+ * CM_ML_MODEL_DATASET: `imagenet2012-val`
+ * CM_ML_MODEL_RETRAINING: `no`
+ * CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `no`
+ * CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32`
+ * CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32`
+ * CM_ML_MODEL_MOBILENET_NAME_SUFFIX: ``
+
+
+
+___
+#### Script output
+```bash
+cmr "get ml-model mobilenet raw ml-model-mobilenet image-classification [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-neuralmagic-zoo/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-neuralmagic-zoo/index.md
new file mode 100644
index 0000000000..ddbfc6af02
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-neuralmagic-zoo/index.md
@@ -0,0 +1,271 @@
+# get-ml-model-neuralmagic-zoo
+Automatically generated README for this automation recipe: **get-ml-model-neuralmagic-zoo**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-neuralmagic-zoo/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get ml-model model zoo deepsparse model-zoo sparse-zoo neuralmagic neural-magic" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,ml-model,model,zoo,deepsparse,model-zoo,sparse-zoo,neuralmagic,neural-magic[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get ml-model model zoo deepsparse model-zoo sparse-zoo neuralmagic neural-magic [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,ml-model,model,zoo,deepsparse,model-zoo,sparse-zoo,neuralmagic,neural-magic'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get ml-model model zoo deepsparse model-zoo sparse-zoo neuralmagic neural-magic[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_bert-base-pruned90-none`
+ - Aliases: `_model-stub.zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned90-none`
+ - ENV variables:
+ - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned90-none`
+ - CM_ML_MODEL_FULL_NAME: `bert-base-pruned90-none-bert-99`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-base-uncased`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_RETRAINING: `no`
+ * `_bert-base-pruned95_obs_quant-none`
+ - Aliases: `_model-stub.zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned95_obs_quant-none`
+ - ENV variables:
+ - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned95_obs_quant-none`
+ - CM_ML_MODEL_FULL_NAME: `bert-base-pruned95_obs_quant-none-bert-99`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-base-uncased`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64`
+ - CM_ML_MODEL_RETRAINING: `yes`
+ * `_bert-base_cased-pruned90-none`
+ - Aliases: `_model-stub.zoo:nlp/question_answering/bert-base_cased/pytorch/huggingface/squad/pruned90-none`
+ - ENV variables:
+ - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/bert-base_cased/pytorch/huggingface/squad/pruned90-none`
+ - CM_ML_MODEL_FULL_NAME: `bert-base_cased-pruned90-none-bert-99`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-base-cased`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_RETRAINING: `no`
+ * `_bert-large-base-none`
+ - Aliases: `_model-stub.zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/base-none`
+ - ENV variables:
+ - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/base-none`
+ - CM_ML_MODEL_FULL_NAME: `bert-large-base-none-bert-99`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_RETRAINING: `no`
+ * `_bert-large-pruned80_quant-none-vnni`
+ - Aliases: `_model-stub.zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/pruned80_quant-none-vnni`
+ - ENV variables:
+ - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/pruned80_quant-none-vnni`
+ - CM_ML_MODEL_FULL_NAME: `bert-large-pruned80_quant-none-vnni-bert-99`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64`
+ - CM_ML_MODEL_RETRAINING: `no`
+ * `_mobilebert-14layer_pruned50-none-vnni`
+ - Aliases: `_model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50-none-vnni`
+ - ENV variables:
+ - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50-none-vnni`
+ - CM_ML_MODEL_FULL_NAME: `mobilebert-14layer_pruned50-none-vnni-bert-99`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_RETRAINING: `no`
+ * `_mobilebert-14layer_pruned50_quant-none-vnni`
+ - Aliases: `_model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50_quant-none-vnni`
+ - ENV variables:
+ - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50_quant-none-vnni`
+ - CM_ML_MODEL_FULL_NAME: `mobilebert-14layer_pruned50_quant-none-vnni-bert-99`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64`
+ - CM_ML_MODEL_RETRAINING: `yes`
+ * `_mobilebert-base_quant-none`
+ - Aliases: `_model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none`
+ - ENV variables:
+ - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none`
+ - CM_ML_MODEL_FULL_NAME: `mobilebert-base_quant-none-bert-99`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64`
+ - CM_ML_MODEL_RETRAINING: `yes`
+ * `_mobilebert-none-base-none`
+ - Aliases: `_model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base-none`
+ - ENV variables:
+ - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base-none`
+ - CM_ML_MODEL_FULL_NAME: `mobilebert-none-base-none-bert-99`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_RETRAINING: `no`
+ * `_model-stub.#`
+ - ENV variables:
+ - CM_MODEL_ZOO_STUB: `#`
+ * `_obert-base-pruned90-none`
+ - Aliases: `_model-stub.zoo:nlp/question_answering/obert-base/pytorch/huggingface/squad/pruned90-none`
+ - ENV variables:
+ - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/obert-base/pytorch/huggingface/squad/pruned90-none`
+ - CM_ML_MODEL_FULL_NAME: `obert-base-pruned90-none-bert-99`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_RETRAINING: `no`
+ * `_obert-large-base-none`
+ - Aliases: `_model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/base-none`
+ - ENV variables:
+ - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/base-none`
+ - CM_ML_MODEL_FULL_NAME: `obert-large-base-none-bert-99`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_RETRAINING: `no`
+ * `_obert-large-pruned95-none-vnni`
+ - Aliases: `_model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95-none-vnni`
+ - ENV variables:
+ - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95-none-vnni`
+ - CM_ML_MODEL_FULL_NAME: `obert-large-pruned95-none-vnni-bert-99`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_RETRAINING: `no`
+ * `_obert-large-pruned95_quant-none-vnni`
+ - Aliases: `_model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95_quant-none-vnni`
+ - ENV variables:
+ - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95_quant-none-vnni`
+ - CM_ML_MODEL_FULL_NAME: `obert-large-pruned95_quant-none-vnni-bert-99`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64`
+ - CM_ML_MODEL_RETRAINING: `yes`
+ * `_obert-large-pruned97-none`
+ - Aliases: `_model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97-none`
+ - ENV variables:
+ - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97-none`
+ - CM_ML_MODEL_FULL_NAME: `obert-large-pruned97-none-bert-99`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_RETRAINING: `no`
+ * `_obert-large-pruned97-quant-none`
+ - Aliases: `_model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97_quant-none`
+ - ENV variables:
+ - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97_quant-none`
+ - CM_ML_MODEL_FULL_NAME: `obert-large-pruned97-quant-none-bert-99`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64`
+ - CM_ML_MODEL_RETRAINING: `no`
+ * `_oberta-base-pruned90-quant-none`
+ - Aliases: `_model-stub.zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned90_quant-none`
+ - ENV variables:
+ - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned90_quant-none`
+ - CM_ML_MODEL_FULL_NAME: `oberta-base-pruned90-quant-none-bert-99`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/roberta-base`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64`
+ - CM_ML_MODEL_RETRAINING: `no`
+ * `_roberta-base-pruned85-quant-none`
+ - Aliases: `_model-stub.zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/pruned85_quant-none`
+ - ENV variables:
+ - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/pruned85_quant-none`
+ - CM_ML_MODEL_FULL_NAME: `roberta-base-pruned85-quant-none-bert-99`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/roberta-base`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64`
+ - CM_ML_MODEL_RETRAINING: `no`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-neuralmagic-zoo/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-neuralmagic-zoo/run.bat)
+___
+#### Script output
+```bash
+cmr "get ml-model model zoo deepsparse model-zoo sparse-zoo neuralmagic neural-magic [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-resnet50/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-resnet50/index.md
new file mode 100644
index 0000000000..0f2ff13a7e
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-resnet50/index.md
@@ -0,0 +1,228 @@
+# get-ml-model-resnet50
+Automatically generated README for this automation recipe: **get-ml-model-resnet50**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-resnet50/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-resnet50/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get raw ml-model resnet50 ml-model-resnet50 image-classification" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,raw,ml-model,resnet50,ml-model-resnet50,image-classification[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get raw ml-model resnet50 ml-model-resnet50 image-classification [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,raw,ml-model,resnet50,ml-model-resnet50,image-classification'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get raw ml-model resnet50 ml-model-resnet50 image-classification[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_batch_size.#`
+ - ENV variables:
+ - CM_ML_MODEL_BATCH_SIZE: `#`
+ * `_batch_size.1`
+ - ENV variables:
+ - CM_ML_MODEL_BATCH_SIZE: `1`
+ * `_fix-input-shape`
+ * `_from-tf`
+ * `_huggingface_default`
+ - ENV variables:
+ - CM_PACKAGE_URL: `https://huggingface.co/ctuning/mlperf-inference-resnet50-onnx-fp32-imagenet2012-v1.0/resolve/main/resnet50_v1.onnx`
+
+
+
+
+ * Group "**framework**"
+
+ Click here to expand this section.
+
+ * `_ncnn`
+ - ENV variables:
+ - CM_ML_MODEL_FRAMEWORK: `ncnn`
+ * **`_onnx`** (default)
+ - Aliases: `_onnxruntime`
+ - ENV variables:
+ - CM_ML_MODEL_DATA_LAYOUT: `NCHW`
+ - CM_ML_MODEL_FRAMEWORK: `onnx`
+ - CM_ML_MODEL_INPUT_LAYERS: `input_tensor:0`
+ - CM_ML_MODEL_INPUT_LAYER_NAME: `input_tensor:0`
+ - CM_ML_MODEL_INPUT_SHAPES: `\"input_tensor:0\": (BATCH_SIZE, 3, 224, 224)`
+ - CM_ML_MODEL_OUTPUT_LAYERS: `softmax_tensor:0`
+ - CM_ML_MODEL_OUTPUT_LAYER_NAME: `softmax_tensor:0`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `<<>>`
+ - CM_ML_MODEL_VER: `1.5`
+ * `_pytorch`
+ - ENV variables:
+ - CM_ML_MODEL_DATA_LAYOUT: `NCHW`
+ - CM_ML_MODEL_FRAMEWORK: `pytorch`
+ - CM_ML_MODEL_GIVEN_CHANNEL_MEANS: `?`
+ - CM_ML_MODEL_INPUT_LAYER_NAME: `input_tensor:0`
+ - CM_ML_MODEL_INPUT_SHAPES: `\"input_tensor:0\": [BATCH_SIZE, 3, 224, 224]`
+ - CM_ML_MODEL_OUTPUT_LAYERS: `output`
+ - CM_ML_MODEL_OUTPUT_LAYER_NAME: `?`
+ - CM_ML_STARTING_WEIGHTS_FILENAME: `<<>>`
+ * `_tensorflow`
+ - Aliases: `_tf`
+ - ENV variables:
+ - CM_ML_MODEL_ACCURACY: `76.456`
+ - CM_ML_MODEL_DATA_LAYOUT: `NHWC`
+ - CM_ML_MODEL_FRAMEWORK: `tensorflow`
+ - CM_ML_MODEL_GIVEN_CHANNEL_MEANS: `123.68 116.78 103.94`
+ - CM_ML_MODEL_INPUT_LAYERS: `input_tensor`
+ - CM_ML_MODEL_INPUT_LAYER_NAME: `input_tensor`
+ - CM_ML_MODEL_INPUT_SHAPES: `\"input_tensor:0\": (BATCH_SIZE, 3, 224, 224)`
+ - CM_ML_MODEL_NORMALIZE_DATA: `0`
+ - CM_ML_MODEL_OUTPUT_LAYERS: `softmax_tensor`
+ - CM_ML_MODEL_OUTPUT_LAYER_NAME: `softmax_tensor`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `<<>>`
+ - CM_ML_MODEL_SUBTRACT_MEANS: `YES`
+ - CM_PACKAGE_URL: `https://zenodo.org/record/2535873/files/resnet50_v1.pb`
+ * `_tflite`
+ - ENV variables:
+ - CM_ML_MODEL_ACCURACY: `76.456`
+ - CM_ML_MODEL_DATA_LAYOUT: `NHWC`
+ - CM_ML_MODEL_FRAMEWORK: `tflite`
+ - CM_ML_MODEL_GIVEN_CHANNEL_MEANS: `123.68 116.78 103.94`
+ - CM_ML_MODEL_INPUT_LAYERS: `input_tensor`
+ - CM_ML_MODEL_INPUT_LAYER_NAME: `input_tensor`
+ - CM_ML_MODEL_INPUT_SHAPES: `\"input_tensor 2\": (BATCH_SIZE, 224, 224, 3)`
+ - CM_ML_MODEL_NORMALIZE_DATA: `0`
+ - CM_ML_MODEL_OUTPUT_LAYERS: `softmax_tensor`
+ - CM_ML_MODEL_OUTPUT_LAYER_NAME: `softmax_tensor`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `<<>>`
+ - CM_ML_MODEL_SUBTRACT_MEANS: `YES`
+
+
+
+
+ * Group "**model-output**"
+
+ Click here to expand this section.
+
+ * **`_argmax`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_OUTPUT_LAYER_ARGMAX: `yes`
+ * `_no-argmax`
+ - ENV variables:
+ - CM_ML_MODEL_OUTPUT_LAYER_ARGMAX: `no`
+
+
+
+
+ * Group "**opset-version**"
+
+ Click here to expand this section.
+
+ * `_opset-11`
+ - ENV variables:
+ - CM_ML_MODEL_ONNX_OPSET: `11`
+ * `_opset-8`
+ - ENV variables:
+ - CM_ML_MODEL_ONNX_OPSET: `8`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * **`_fp32`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32`
+ - CM_ML_MODEL_PRECISION: `fp32`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32`
+ * `_int8`
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `int8`
+ - CM_ML_MODEL_PRECISION: `int8`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `int8`
+ * `_uint8`
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `uint8`
+ - CM_ML_MODEL_PRECISION: `uint8`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `uint8`
+
+
+
+
+ ##### Default variations
+
+ `_argmax,_fp32,_onnx`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-fix-input.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-resnet50/run-fix-input.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get raw ml-model resnet50 ml-model-resnet50 image-classification [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-retinanet-nvidia/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-retinanet-nvidia/index.md
new file mode 100644
index 0000000000..aa0894064f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-retinanet-nvidia/index.md
@@ -0,0 +1,111 @@
+# get-ml-model-retinanet-nvidia
+Automatically generated README for this automation recipe: **get-ml-model-retinanet-nvidia**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-retinanet-nvidia/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get ml-model nvidia-retinanet nvidia" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,ml-model,nvidia-retinanet,nvidia[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get ml-model nvidia-retinanet nvidia [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,ml-model,nvidia-retinanet,nvidia'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get ml-model nvidia-retinanet nvidia[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_efficient-nms`
+ - ENV variables:
+ - CM_NVIDIA_EFFICIENT_NMS: `yes`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_TORCH_DEVICE: `cpu`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-retinanet-nvidia/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get ml-model nvidia-retinanet nvidia [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-retinanet/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-retinanet/index.md
new file mode 100644
index 0000000000..db0a15981c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-retinanet/index.md
@@ -0,0 +1,140 @@
+# get-ml-model-retinanet
+Automatically generated README for this automation recipe: **get-ml-model-retinanet**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-retinanet/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-retinanet/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get ml-model raw resnext50 retinanet object-detection" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,ml-model,raw,resnext50,retinanet,object-detection[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get ml-model raw resnext50 retinanet object-detection [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,ml-model,raw,resnext50,retinanet,object-detection'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get ml-model raw resnext50 retinanet object-detection[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_no-nms`
+ - ENV variables:
+ - CM_TMP_ML_MODEL_RETINANET_NO_NMS: `yes`
+ - CM_ML_MODEL_RETINANET_NO_NMS: `yes`
+ - CM_QAIC_PRINT_NODE_PRECISION_INFO: `yes`
+ * `_weights`
+ - ENV variables:
+ - CM_MODEL_WEIGHTS_FILE: `yes`
+
+
+
+
+ * Group "**framework**"
+
+ Click here to expand this section.
+
+ * **`_onnx`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_DATA_LAYOUT: `NCHW`
+ - CM_ML_MODEL_FRAMEWORK: `onnx`
+ * `_pytorch`
+ - ENV variables:
+ - CM_ML_MODEL_DATA_LAYOUT: `NCHW`
+ - CM_ML_MODEL_FRAMEWORK: `pytorch`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * **`_fp32`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32`
+ - CM_ML_MODEL_PRECISION: `fp32`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32`
+
+
+
+
+ ##### Default variations
+
+ `_fp32,_onnx`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-no-nms.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-retinanet/run-no-nms.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get ml-model raw resnext50 retinanet object-detection [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-rnnt/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-rnnt/index.md
new file mode 100644
index 0000000000..82a0da040d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-rnnt/index.md
@@ -0,0 +1,133 @@
+# get-ml-model-rnnt
+Automatically generated README for this automation recipe: **get-ml-model-rnnt**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-rnnt/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get ml-model rnnt raw librispeech speech-recognition" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,ml-model,rnnt,raw,librispeech,speech-recognition[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get ml-model rnnt raw librispeech speech-recognition [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,ml-model,rnnt,raw,librispeech,speech-recognition'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get ml-model rnnt raw librispeech speech-recognition[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_weights`
+ - ENV variables:
+ - CM_MODEL_WEIGHTS_FILE: `yes`
+
+
+
+
+ * Group "**download-src**"
+
+ Click here to expand this section.
+
+ * `_amazon-s3`
+ * **`_zenodo`** (default)
+
+
+
+
+ * Group "**framework**"
+
+ Click here to expand this section.
+
+ * **`_pytorch`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_FRAMEWORK: `pytorch`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * **`_fp32`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32`
+ - CM_ML_MODEL_PRECISION: `fp32`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32`
+
+
+
+
+ ##### Default variations
+
+ `_fp32,_pytorch,_zenodo`
+
+___
+#### Script output
+```bash
+cmr "get ml-model rnnt raw librispeech speech-recognition [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-stable-diffusion/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-stable-diffusion/index.md
new file mode 100644
index 0000000000..e488ff1054
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-stable-diffusion/index.md
@@ -0,0 +1,177 @@
+# get-ml-model-stable-diffusion
+Automatically generated README for this automation recipe: **get-ml-model-stable-diffusion**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-stable-diffusion/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get raw ml-model stable-diffusion sdxl text-to-image" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,raw,ml-model,stable-diffusion,sdxl,text-to-image[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get raw ml-model stable-diffusion sdxl text-to-image [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,raw,ml-model,stable-diffusion,sdxl,text-to-image'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get raw ml-model stable-diffusion sdxl text-to-image[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_batch_size.#`
+ - ENV variables:
+ - CM_ML_MODEL_BATCH_SIZE: `#`
+
+
+
+
+ * Group "**download-source**"
+
+ Click here to expand this section.
+
+ * `_huggingface`
+ * **`_mlcommons`** (default)
+
+
+
+
+ * Group "**download-tool**"
+
+ Click here to expand this section.
+
+ * `_git`
+ - ENV variables:
+ - CM_DOWNLOAD_TOOL: `git`
+ * `_rclone`
+ - ENV variables:
+ - CM_RCLONE_CONFIG_CMD: `rclone config create mlc-inference s3 provider=Cloudflare access_key_id=f65ba5eef400db161ea49967de89f47b secret_access_key=fbea333914c292b854f14d3fe232bad6c5407bf0ab1bebf78833c2b359bdfd2b endpoint=https://c2686074cb2caf5cbaf6d134bdba8b47.r2.cloudflarestorage.com`
+ - CM_DOWNLOAD_TOOL: `rclone`
+ * `_wget`
+ - ENV variables:
+ - CM_DOWNLOAD_TOOL: `wget`
+
+
+
+
+ * Group "**framework**"
+
+ Click here to expand this section.
+
+ * **`_pytorch`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_FRAMEWORK: `pytorch`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * `_fp16`
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `fp16`
+ - CM_ML_MODEL_PRECISION: `fp16`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp16`
+ * **`_fp32`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32`
+ - CM_ML_MODEL_PRECISION: `fp32`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32`
+ * `_int8`
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `int8`
+ - CM_ML_MODEL_PRECISION: `int8`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `int8`
+ * `_uint8`
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `uint8`
+ - CM_ML_MODEL_PRECISION: `uint8`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `uint8`
+
+
+
+
+ ##### Default variations
+
+ `_fp32,_mlcommons,_pytorch`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--checkpoint=value` → `SDXL_CHECKPOINT_PATH=value`
+ * `--download_path=value` → `CM_DOWNLOAD_PATH=value`
+ * `--to=value` → `CM_DOWNLOAD_PATH=value`
+
+
+
+
+___
+#### Script output
+```bash
+cmr "get raw ml-model stable-diffusion sdxl text-to-image [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-tiny-resnet/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-tiny-resnet/index.md
new file mode 100644
index 0000000000..cb4084c9f8
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-tiny-resnet/index.md
@@ -0,0 +1,153 @@
+# get-ml-model-tiny-resnet
+Automatically generated README for this automation recipe: **get-ml-model-tiny-resnet**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-tiny-resnet/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get raw ml-model resnet pretrained tiny model ic ml-model-tiny-resnet image-classification" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,raw,ml-model,resnet,pretrained,tiny,model,ic,ml-model-tiny-resnet,image-classification[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get raw ml-model resnet pretrained tiny model ic ml-model-tiny-resnet image-classification [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,raw,ml-model,resnet,pretrained,tiny,model,ic,ml-model-tiny-resnet,image-classification'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get raw ml-model resnet pretrained tiny model ic ml-model-tiny-resnet image-classification[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_batch_size.#`
+ - ENV variables:
+ - CM_ML_MODEL_BATCH_SIZE: `#`
+
+
+
+
+ * Group "**framework**"
+
+ Click here to expand this section.
+
+ * `_onnx`
+ - ENV variables:
+ - CM_TMP_ML_MODEL_TF2ONNX: `yes`
+ * **`_tflite`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_ACCURACY: `85`
+ - CM_ML_MODEL_DATA_LAYOUT: `NHWC`
+ - CM_ML_MODEL_FRAMEWORK: `tflite`
+ - CM_ML_MODEL_GIVEN_CHANNEL_MEANS: ``
+ - CM_ML_MODEL_INPUT_LAYERS: ``
+ - CM_ML_MODEL_INPUT_LAYER_NAME: ``
+ - CM_ML_MODEL_INPUT_SHAPES: ``
+ - CM_ML_MODEL_NORMALIZE_DATA: `0`
+ - CM_ML_MODEL_OUTPUT_LAYERS: ``
+ - CM_ML_MODEL_OUTPUT_LAYER_NAME: ``
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `<<>>`
+ - CM_ML_MODEL_SUBTRACT_MEANS: `YES`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * `_fp32`
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32`
+ - CM_ML_MODEL_PRECISION: `fp32`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32`
+ * **`_int8`** (default)
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `int8`
+ - CM_ML_MODEL_PRECISION: `int8`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `int8`
+ * `_uint8`
+ - ENV variables:
+ - CM_ML_MODEL_INPUT_DATA_TYPES: `uint8`
+ - CM_ML_MODEL_PRECISION: `uint8`
+ - CM_ML_MODEL_WEIGHT_DATA_TYPES: `uint8`
+
+
+
+
+ ##### Default variations
+
+ `_int8,_tflite`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-tiny-resnet/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get raw ml-model resnet pretrained tiny model ic ml-model-tiny-resnet image-classification [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-using-imagenet-from-model-zoo/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-using-imagenet-from-model-zoo/index.md
new file mode 100644
index 0000000000..27bce37658
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-using-imagenet-from-model-zoo/index.md
@@ -0,0 +1,95 @@
+# get-ml-model-using-imagenet-from-model-zoo
+Automatically generated README for this automation recipe: **get-ml-model-using-imagenet-from-model-zoo**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-using-imagenet-from-model-zoo/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get ml-model model-zoo zoo imagenet image-classification" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,ml-model,model-zoo,zoo,imagenet,image-classification[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get ml-model model-zoo zoo imagenet image-classification [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,ml-model,model-zoo,zoo,imagenet,image-classification'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get ml-model model-zoo zoo imagenet image-classification[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**model-source**"
+
+ Click here to expand this section.
+
+ * `_model.#`
+ * `_model.resnet101-pytorch-base`
+ * `_model.resnet50-pruned95-uniform-quant`
+
+
+
+
+___
+#### Script output
+```bash
+cmr "get ml-model model-zoo zoo imagenet image-classification [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-tvm-model/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-tvm-model/index.md
new file mode 100644
index 0000000000..4cff76283d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-tvm-model/index.md
@@ -0,0 +1,188 @@
+# get-tvm-model
+Automatically generated README for this automation recipe: **get-tvm-model**
+
+Category: **[AI/ML models](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tvm-model/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tvm-model/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get ml-model-tvm tvm-model" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,ml-model-tvm,tvm-model[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get ml-model-tvm tvm-model [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,ml-model-tvm,tvm-model'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get ml-model-tvm tvm-model[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_tune-model`
+ - ENV variables:
+ - CM_TUNE_TVM_MODEL: `yes`
+
+
+
+
+ * Group "**batchsize**"
+
+ Click here to expand this section.
+
+ * `_batch_size.#`
+ - ENV variables:
+ - CM_ML_MODEL_MAX_BATCH_SIZE: `#`
+
+
+
+
+ * Group "**frontend**"
+
+ Click here to expand this section.
+
+ * **`_onnx`** (default)
+ - ENV variables:
+ - CM_TVM_FRONTEND_FRAMEWORK: `onnx`
+ * `_pytorch`
+ - Aliases: `_torch`
+ - ENV variables:
+ - CM_TVM_FRONTEND_FRAMEWORK: `pytorch`
+ * `_tensorflow`
+ - Aliases: `_tf`
+ - ENV variables:
+ - CM_TVM_FRONTEND_FRAMEWORK: `tensorflow`
+ * `_tflite`
+ - ENV variables:
+ - CM_TVM_FRONTEND_FRAMEWORK: `tflite`
+
+
+
+
+ * Group "**model**"
+
+ Click here to expand this section.
+
+ * `_model.#`
+ - ENV variables:
+ - CM_ML_MODEL: `#`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * **`_fp32`** (default)
+ * `_int8`
+ * `_uint8`
+
+
+
+
+ * Group "**runtime**"
+
+ Click here to expand this section.
+
+ * `_graph_executor`
+ - ENV variables:
+ - CM_TVM_USE_VM: `no`
+ * **`_virtual_machine`** (default)
+ - ENV variables:
+ - CM_TVM_USE_VM: `yes`
+
+
+
+
+ ##### Default variations
+
+ `_fp32,_onnx,_virtual_machine`
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_ML_MODEL_MAX_BATCH_SIZE: `1`
+ * CM_TUNE_TVM_MODEL: `no`
+ * CM_TVM_USE_VM: `yes`
+ * CM_TVM_FRONTEND_FRAMEWORK: `onnx`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tvm-model/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get ml-model-tvm tvm-model [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/index.md
new file mode 100644
index 0000000000..c3c12890ad
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/index.md
@@ -0,0 +1,21 @@
+* [convert-ml-model-huggingface-to-onnx](convert-ml-model-huggingface-to-onnx/index.md)
+* [get-bert-squad-vocab](get-bert-squad-vocab/index.md)
+* [get-dlrm](get-dlrm/index.md)
+* [get-ml-model-3d-unet-kits19](get-ml-model-3d-unet-kits19/index.md)
+* [get-ml-model-bert-base-squad](get-ml-model-bert-base-squad/index.md)
+* [get-ml-model-bert-large-squad](get-ml-model-bert-large-squad/index.md)
+* [get-ml-model-dlrm-terabyte](get-ml-model-dlrm-terabyte/index.md)
+* [get-ml-model-efficientnet-lite](get-ml-model-efficientnet-lite/index.md)
+* [get-ml-model-gptj](get-ml-model-gptj/index.md)
+* [get-ml-model-huggingface-zoo](get-ml-model-huggingface-zoo/index.md)
+* [get-ml-model-llama2](get-ml-model-llama2/index.md)
+* [get-ml-model-mobilenet](get-ml-model-mobilenet/index.md)
+* [get-ml-model-neuralmagic-zoo](get-ml-model-neuralmagic-zoo/index.md)
+* [get-ml-model-resnet50](get-ml-model-resnet50/index.md)
+* [get-ml-model-retinanet](get-ml-model-retinanet/index.md)
+* [get-ml-model-retinanet-nvidia](get-ml-model-retinanet-nvidia/index.md)
+* [get-ml-model-rnnt](get-ml-model-rnnt/index.md)
+* [get-ml-model-stable-diffusion](get-ml-model-stable-diffusion/index.md)
+* [get-ml-model-tiny-resnet](get-ml-model-tiny-resnet/index.md)
+* [get-ml-model-using-imagenet-from-model-zoo](get-ml-model-using-imagenet-from-model-zoo/index.md)
+* [get-tvm-model](get-tvm-model/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/calibrate-model-for.qaic/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/calibrate-model-for.qaic/index.md
new file mode 100644
index 0000000000..9c61d11241
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/calibrate-model-for.qaic/index.md
@@ -0,0 +1,186 @@
+# calibrate-model-for.qaic
+Automatically generated README for this automation recipe: **calibrate-model-for.qaic**
+
+Category: **[AI/ML optimization](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/calibrate-model-for.qaic/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "qaic calibrate profile qaic-profile qaic-calibrate" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=qaic,calibrate,profile,qaic-profile,qaic-calibrate[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "qaic calibrate profile qaic-profile qaic-calibrate [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'qaic,calibrate,profile,qaic-profile,qaic-calibrate'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "qaic calibrate profile qaic-profile qaic-calibrate[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_first.#`
+
+
+
+
+ * Group "**batch-size**"
+
+ Click here to expand this section.
+
+ * `_bs.#`
+ - ENV variables:
+ - CM_QAIC_MODEL_BATCH_SIZE: `#`
+ - CM_CREATE_INPUT_BATCH: `yes`
+ * `_bs.1`
+ - ENV variables:
+ - CM_QAIC_MODEL_BATCH_SIZE: `1`
+ - CM_CREATE_INPUT_BATCH: `yes`
+
+
+
+
+ * Group "**calib-dataset-filter-size**"
+
+ Click here to expand this section.
+
+ * `_filter-size.#`
+
+
+
+
+ * Group "**calibration-option**"
+
+ Click here to expand this section.
+
+ * `_mlperf.option1`
+ * `_mlperf.option2`
+
+
+
+
+ * Group "**model**"
+
+ Click here to expand this section.
+
+ * `_bert-99`
+ - ENV variables:
+ - CM_CALIBRATE_SQUAD: `yes`
+ - CM_QAIC_COMPILER_ARGS: ``
+ - CM_QAIC_COMPILER_PARAMS: `-onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,<<>> -input-list-file=<<>> -num-histogram-bins=512 -profiling-threads=<<>>`
+ - CM_QAIC_MODEL_TO_CONVERT: `calibrate_bert_mlperf`
+ * `_resnet50`
+ - ENV variables:
+ - CM_QAIC_MODEL_NAME: `resnet50`
+ - CM_CALIBRATE_IMAGENET: `yes`
+ - CM_QAIC_COMPILER_ARGS: ``
+ - CM_QAIC_COMPILER_PARAMS: `-output-node-name=ArgMax -profiling-threads=<<>>`
+ - CM_QAIC_OUTPUT_NODE_NAME: `-output-node-name=ArgMax`
+ - CM_QAIC_MODEL_TO_CONVERT: `calibrate_resnet50_tf`
+ * `_retinanet`
+ - ENV variables:
+ - CM_QAIC_MODEL_NAME: `retinanet`
+ - CM_CALIBRATE_OPENIMAGES: `yes`
+ - CM_QAIC_COMPILER_ARGS: ``
+ - CM_QAIC_COMPILER_PARAMS: `-enable-channelwise -profiling-threads=<<>> -onnx-define-symbol=batch_size,<<>> -node-precision-info=<<>>`
+ - CM_QAIC_MODEL_TO_CONVERT: `calibrate_retinanet_no_nms_mlperf`
+
+
+
+
+ * Group "**model-framework**"
+
+ Click here to expand this section.
+
+ * `_tf`
+
+
+
+
+ * Group "**seq-length**"
+
+ Click here to expand this section.
+
+ * `_seq.#`
+ - ENV variables:
+ - CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH: `#`
+ * `_seq.384`
+ - ENV variables:
+ - CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH: `#`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/calibrate-model-for.qaic/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "qaic calibrate profile qaic-profile qaic-calibrate [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/compile-model-for.qaic/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/compile-model-for.qaic/index.md
new file mode 100644
index 0000000000..4bc9d3db74
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/compile-model-for.qaic/index.md
@@ -0,0 +1,216 @@
+# compile-model-for.qaic
+Automatically generated README for this automation recipe: **compile-model-for.qaic**
+
+Category: **[AI/ML optimization](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/compile-model-for.qaic/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "qaic compile model model-compile qaic-compile" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=qaic,compile,model,model-compile,qaic-compile[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "qaic compile model model-compile qaic-compile [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'qaic,compile,model,model-compile,qaic-compile'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "qaic compile model model-compile qaic-compile[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_bert-99`
+ - ENV variables:
+ - CM_COMPILE_BERT: `on`
+ - CM_QAIC_MODEL_TO_CONVERT: `calibrate_bert_mlperf`
+ - CM_QAIC_MODEL_COMPILER_PARAMS_BASE: `-aic-hw -aic-hw-version=2.0 -execute-nodes-in-fp16=Add,Div,Erf,Softmax -quantization-schema=symmetric_with_uint8 -quantization-precision=Int8 -quantization-precision-bias=Int32 -vvv -compile-only -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,384 -multicast-weights -combine-inputs=false -combine-outputs=false`
+ - CM_QAIC_MODEL_COMPILER_ARGS: ``
+ * `_bert-99.9`
+ - ENV variables:
+ - CM_COMPILE_BERT: `on`
+ - CM_QAIC_MODEL_TO_CONVERT: `bert_mlperf`
+ - CM_QAIC_MODEL_COMPILER_PARAMS_BASE: `-aic-hw -aic-hw-version=2.0 -convert-to-fp16 -vvv -compile-only -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,384 -combine-inputs=false -combine-outputs=false`
+ - CM_QAIC_MODEL_COMPILER_ARGS: ``
+ * `_resnet50`
+ - ENV variables:
+ - CM_COMPILE_RESNET: `on`
+ - CM_QAIC_MODEL_TO_CONVERT: `compile_resnet50_tf`
+ - CM_QAIC_MODEL_COMPILER_PARAMS_BASE: `-aic-hw -aic-hw-version=2.0 -quantization-schema=symmetric_with_uint8 -quantization-precision=Int8 -output-node-name=ArgMax -vvv -compile-only -use-producer-dma=1`
+ * `_retinanet`
+ - ENV variables:
+ - CM_COMPILE_RETINANET: `on`
+ - CM_QAIC_MODEL_TO_CONVERT: `calibrate_retinanet_no_nms_mlperf`
+ - CM_QAIC_MODEL_COMPILER_ARGS: `-aic-enable-depth-first`
+ - CM_QAIC_MODEL_COMPILER_PARAMS_BASE: `-aic-hw -aic-hw-version=2.0 -compile-only -enable-channelwise -onnx-define-symbol=batch_size,1 -node-precision-info=<<>> -quantization-schema-constants=symmetric_with_uint8 -quantization-schema-activations=asymmetric -quantization-calibration=None`
+
+
+
+
+ * Group "**batch-size**"
+
+ Click here to expand this section.
+
+ * `_bs.#`
+ - ENV variables:
+ - CM_QAIC_MODEL_BATCH_SIZE: `#`
+ * `_bs.1`
+ - ENV variables:
+ - CM_QAIC_MODEL_BATCH_SIZE: `1`
+
+
+
+
+ * Group "**calib-dataset-filter-size**"
+
+ Click here to expand this section.
+
+ * `_filter-size.#`
+
+
+
+
+ * Group "**mlperf-scenario**"
+
+ Click here to expand this section.
+
+ * `_multistream`
+ * `_offline`
+ * `_server`
+ * **`_singlestream`** (default)
+
+
+
+
+ * Group "**model-framework**"
+
+ Click here to expand this section.
+
+ * `_tf`
+
+
+
+
+ * Group "**nsp**"
+
+ Click here to expand this section.
+
+ * `_nsp.14`
+ * `_nsp.16`
+ * `_nsp.8`
+ * `_nsp.9`
+
+
+
+
+ * Group "**percentile-calibration**"
+
+ Click here to expand this section.
+
+ * `_pc.#`
+ - ENV variables:
+ - CM_QAIC_MODEL_COMPILER_PERCENTILE_CALIBRATION_VALUE: `#`
+ - CM_QAIC_MODEL_COMPILER_QUANTIZATION_PARAMS: `-quantization-calibration=Percentile -percentile-calibration-value=<<>>`
+
+
+
+
+ * Group "**quantization**"
+
+ Click here to expand this section.
+
+ * `_no-quantized`
+ - ENV variables:
+ - CM_QAIC_MODEL_QUANTIZATION: `no`
+ * **`_quantized`** (default)
+ - ENV variables:
+ - CM_QAIC_MODEL_QUANTIZATION: `yes`
+
+
+
+
+ ##### Default variations
+
+ `_quantized,_singlestream`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--register=value` → `CM_REGISTER_CACHE=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/compile-model-for.qaic/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "qaic compile model model-compile qaic-compile [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/index.md
new file mode 100644
index 0000000000..c1a2508513
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/index.md
@@ -0,0 +1,3 @@
+* [calibrate-model-for.qaic](calibrate-model-for.qaic/index.md)
+* [compile-model-for.qaic](compile-model-for.qaic/index.md)
+* [prune-bert-models](prune-bert-models/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/prune-bert-models/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/prune-bert-models/index.md
new file mode 100644
index 0000000000..bf9821a7ef
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/prune-bert-models/index.md
@@ -0,0 +1,132 @@
+# prune-bert-models
+Automatically generated README for this automation recipe: **prune-bert-models**
+
+Category: **[AI/ML optimization](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/prune-bert-models/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/prune-bert-models/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "prune bert-models bert-prune prune-bert-models" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=prune,bert-models,bert-prune,prune-bert-models[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "prune bert-models bert-prune prune-bert-models [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'prune,bert-models,bert-prune,prune-bert-models'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "prune bert-models bert-prune prune-bert-models[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_model.#`
+ - ENV variables:
+ - CM_BERT_PRUNE_MODEL_NAME: `#`
+ - CM_MODEL_ZOO_STUB: `#`
+ * `_path.#`
+ - ENV variables:
+ - CM_BERT_PRUNE_CKPT_PATH: `#`
+ * `_task.#`
+ - ENV variables:
+ - CM_BERT_PRUNE_TASK: `#`
+
+
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--constraint=value` → `CM_BERT_PRUNE_CONSTRAINT=value`
+ * `--output_dir=value` → `CM_BERT_PRUNE_OUTPUT_DIR=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_BERT_PRUNE_TASK: `squad`
+ * CM_BERT_PRUNE_MODEL_NAME: `bert-large-uncased`
+ * CM_MODEL_ZOO_STUB: `bert-large-uncased`
+ * CM_BERT_PRUNE_CONSTRAINT: `0.5`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prune-bert-models/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "prune bert-models bert-prune prune-bert-models [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-Interface/get-cache-dir/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-Interface/get-cache-dir/index.md
new file mode 100644
index 0000000000..6c62118e81
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-Interface/get-cache-dir/index.md
@@ -0,0 +1,95 @@
+# get-cache-dir
+Automatically generated README for this automation recipe: **get-cache-dir**
+
+Category: **[CM Interface](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cache-dir/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get cache dir directory" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,cache,dir,directory[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get cache dir directory [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,cache,dir,directory'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get cache dir directory[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_name.#`
+ - ENV variables:
+ - CM_CACHE_DIR_NAME: `#`
+
+
+
+
+___
+#### Script output
+```bash
+cmr "get cache dir directory [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-Interface/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-Interface/index.md
new file mode 100644
index 0000000000..32d34042d1
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-Interface/index.md
@@ -0,0 +1 @@
+* [get-cache-dir](get-cache-dir/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-automation/create-custom-cache-entry/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-automation/create-custom-cache-entry/index.md
new file mode 100644
index 0000000000..178195e07a
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-automation/create-custom-cache-entry/index.md
@@ -0,0 +1,92 @@
+# create-custom-cache-entry
+Automatically generated README for this automation recipe: **create-custom-cache-entry**
+
+Category: **[CM automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/create-custom-cache-entry/_cm.yaml)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "create custom cache entry" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=create,custom,cache,entry [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "create custom cache entry " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'create,custom,cache,entry'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "create custom cache entry" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--env_key=value` → `CM_CUSTOM_CACHE_ENTRY_ENV_KEY=value`
+ * `--env_key2=value` → `CM_CUSTOM_CACHE_ENTRY_ENV_KEY2=value`
+ * `--path=value` → `CM_CUSTOM_CACHE_ENTRY_PATH=value`
+ * `--to=value` → `CM_CUSTOM_CACHE_ENTRY_PATH=value`
+
+
+
+
+___
+#### Script output
+```bash
+cmr "create custom cache entry " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-automation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-automation/index.md
new file mode 100644
index 0000000000..996533c19b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-automation/index.md
@@ -0,0 +1 @@
+* [create-custom-cache-entry](create-custom-cache-entry/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/index.md
new file mode 100644
index 0000000000..927cf1b1a0
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/index.md
@@ -0,0 +1,2 @@
+* [test-debug](test-debug/index.md)
+* [test-mlperf-inference-retinanet](test-mlperf-inference-retinanet/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/test-debug/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/test-debug/index.md
new file mode 100644
index 0000000000..0e848be1a8
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/test-debug/index.md
@@ -0,0 +1,87 @@
+# test-debug
+Automatically generated README for this automation recipe: **test-debug**
+
+Category: **[CM interface prototyping](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/test-debug/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/test-debug/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "test cm-debug" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=test,cm-debug
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "test cm-debug "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'test,cm-debug'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "test cm-debug"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-debug/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/test-debug/run.bat)
+___
+#### Script output
+```bash
+cmr "test cm-debug " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/test-mlperf-inference-retinanet/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/test-mlperf-inference-retinanet/index.md
new file mode 100644
index 0000000000..406c3a9ad5
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/test-mlperf-inference-retinanet/index.md
@@ -0,0 +1,86 @@
+# test-mlperf-inference-retinanet
+Automatically generated README for this automation recipe: **test-mlperf-inference-retinanet**
+
+Category: **[CM interface prototyping](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/test-mlperf-inference-retinanet/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "test mlperf-inference-win retinanet windows" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=test,mlperf-inference-win,retinanet,windows
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "test mlperf-inference-win retinanet windows "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'test,mlperf-inference-win,retinanet,windows'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "test mlperf-inference-win retinanet windows"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-mlperf-inference-retinanet/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/test-mlperf-inference-retinanet/run.bat)
+___
+#### Script output
+```bash
+cmr "test mlperf-inference-win retinanet windows " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cuda-devices/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cuda-devices/index.md
new file mode 100644
index 0000000000..7fddb5a55d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cuda-devices/index.md
@@ -0,0 +1,86 @@
+# get-cuda-devices
+Automatically generated README for this automation recipe: **get-cuda-devices**
+
+Category: **[CUDA automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda-devices/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get cuda-devices" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,cuda-devices
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get cuda-devices "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,cuda-devices'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get cuda-devices"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda-devices/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda-devices/run.bat)
+___
+#### Script output
+```bash
+cmr "get cuda-devices " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cuda/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cuda/index.md
new file mode 100644
index 0000000000..5e789f3e60
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cuda/index.md
@@ -0,0 +1,158 @@
+# get-cuda
+Automatically generated README for this automation recipe: **get-cuda**
+
+Category: **[CUDA automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda/README-extra.md)
+
+
+---
+
+# System dependencies
+
+* Download [CUDA toolkit](https://developer.nvidia.com/cuda-toolkit).
+* Download [cuDNN](https://developer.nvidia.com/rdp/cudnn-download).
+* Download [TensorRT](https://developer.nvidia.com/nvidia-tensorrt-8x-download).
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda/_cm.yaml)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get cuda cuda-compiler cuda-lib toolkit lib nvcc get-nvcc get-cuda 46d133d9ef92422d" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,cuda,cuda-compiler,cuda-lib,toolkit,lib,nvcc,get-nvcc,get-cuda,46d133d9ef92422d[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get cuda cuda-compiler cuda-lib toolkit lib nvcc get-nvcc get-cuda 46d133d9ef92422d [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,cuda,cuda-compiler,cuda-lib,toolkit,lib,nvcc,get-nvcc,get-cuda,46d133d9ef92422d'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get cuda cuda-compiler cuda-lib toolkit lib nvcc get-nvcc get-cuda 46d133d9ef92422d[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_cudnn`
+ - ENV variables:
+ - CM_CUDA_NEEDS_CUDNN: `yes`
+ * `_package-manager`
+ - ENV variables:
+ - CM_CUDA_PACKAGE_MANAGER_INSTALL: `yes`
+
+
+
+
+ * Group "**installation-mode**"
+
+ Click here to expand this section.
+
+ * `_lib-only`
+ - ENV variables:
+ - CM_CUDA_FULL_TOOLKIT_INSTALL: `no`
+ - CM_TMP_FILE_TO_CHECK_UNIX: `libcudart.so`
+ - CM_TMP_FILE_TO_CHECK_WINDOWS: `libcudart.dll`
+ * **`_toolkit`** (default)
+ - ENV variables:
+ - CM_CUDA_FULL_TOOLKIT_INSTALL: `yes`
+ - CM_TMP_FILE_TO_CHECK_UNIX: `nvcc`
+ - CM_TMP_FILE_TO_CHECK_WINDOWS: `nvcc.exe`
+
+
+
+
+ ##### Default variations
+
+ `_toolkit`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--cudnn_tar_file=value` → `CM_CUDNN_TAR_FILE_PATH=value`
+ * `--cudnn_tar_path=value` → `CM_CUDNN_TAR_FILE_PATH=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_CUDA_PATH_LIB_CUDNN_EXISTS: `no`
+ * CM_REQUIRE_INSTALL: `no`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda/run.bat)
+___
+#### Script output
+```bash
+cmr "get cuda cuda-compiler cuda-lib toolkit lib nvcc get-nvcc get-cuda 46d133d9ef92422d [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cudnn/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cudnn/index.md
new file mode 100644
index 0000000000..76655cd84d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cudnn/index.md
@@ -0,0 +1,115 @@
+# get-cudnn
+Automatically generated README for this automation recipe: **get-cudnn**
+
+Category: **[CUDA automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cudnn/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cudnn/_cm.yaml)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get cudnn nvidia" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,cudnn,nvidia [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get cudnn nvidia " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,cudnn,nvidia'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get cudnn nvidia" [--input_flags]
+ ```
+___
+
+=== "Input Flags"
+
+
+ #### Input Flags
+
+ * --**input:** Full path to the installed cuDNN library
+ * --**tar_file:** Full path to the cuDNN Tar file downloaded from Nvidia website (https://developer.nvidia.com/cudnn)
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--input=value` → `CM_INPUT=value`
+ * `--tar_file=value` → `CM_CUDNN_TAR_FILE_PATH=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_INPUT: ``
+ * CM_SUDO: `sudo`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cudnn/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get cudnn nvidia " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-tensorrt/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-tensorrt/index.md
new file mode 100644
index 0000000000..afa8721193
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-tensorrt/index.md
@@ -0,0 +1,119 @@
+# get-tensorrt
+Automatically generated README for this automation recipe: **get-tensorrt**
+
+Category: **[CUDA automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tensorrt/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tensorrt/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get tensorrt nvidia" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,tensorrt,nvidia[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get tensorrt nvidia [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,tensorrt,nvidia'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get tensorrt nvidia[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_dev`
+ - ENV variables:
+ - CM_TENSORRT_REQUIRE_DEV: `yes`
+
+
+
+=== "Input Flags"
+
+
+ #### Input Flags
+
+ * --**input:** Full path to the installed TensorRT library (nvinfer)
+ * --**tar_file:** Full path to the TensorRT Tar file downloaded from the Nvidia website (https://developer.nvidia.com/tensorrt)
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--input=value` → `CM_INPUT=value`
+ * `--tar_file=value` → `CM_TENSORRT_TAR_FILE_PATH=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tensorrt/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get tensorrt nvidia [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/index.md
new file mode 100644
index 0000000000..335dc83a5c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/index.md
@@ -0,0 +1,6 @@
+* [get-cuda](get-cuda/index.md)
+* [get-cuda-devices](get-cuda-devices/index.md)
+* [get-cudnn](get-cudnn/index.md)
+* [get-tensorrt](get-tensorrt/index.md)
+* [install-cuda-package-manager](install-cuda-package-manager/index.md)
+* [install-cuda-prebuilt](install-cuda-prebuilt/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/install-cuda-package-manager/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/install-cuda-package-manager/index.md
new file mode 100644
index 0000000000..84b7b3e48d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/install-cuda-package-manager/index.md
@@ -0,0 +1,87 @@
+# install-cuda-package-manager
+Automatically generated README for this automation recipe: **install-cuda-package-manager**
+
+Category: **[CUDA automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cuda-package-manager/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install package-manager cuda package-manager-cuda install-pm-cuda" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,package-manager,cuda,package-manager-cuda,install-pm-cuda
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install package-manager cuda package-manager-cuda install-pm-cuda "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,package-manager,cuda,package-manager-cuda,install-pm-cuda'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install package-manager cuda package-manager-cuda install-pm-cuda"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cuda-package-manager/run-ubuntu.sh)
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cuda-package-manager/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install package-manager cuda package-manager-cuda install-pm-cuda " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/install-cuda-prebuilt/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/install-cuda-prebuilt/index.md
new file mode 100644
index 0000000000..6748173435
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/install-cuda-prebuilt/index.md
@@ -0,0 +1,138 @@
+# install-cuda-prebuilt
+Automatically generated README for this automation recipe: **install-cuda-prebuilt**
+
+Category: **[CUDA automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cuda-prebuilt/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cuda-prebuilt/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install prebuilt cuda prebuilt-cuda install-prebuilt-cuda" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,prebuilt,cuda,prebuilt-cuda,install-prebuilt-cuda[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install prebuilt cuda prebuilt-cuda install-prebuilt-cuda [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,prebuilt,cuda,prebuilt-cuda,install-prebuilt-cuda'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install prebuilt cuda prebuilt-cuda install-prebuilt-cuda[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**install-driver**"
+
+ Click here to expand this section.
+
+ * `_driver`
+ - ENV variables:
+ - CM_CUDA_INSTALL_DRIVER: `yes`
+ * **`_no-driver`** (default)
+ - ENV variables:
+ - CM_CUDA_INSTALL_DRIVER: `no`
+
+
+
+
+ ##### Default variations
+
+ `_no-driver`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--local_run_file_path=value` → `CUDA_RUN_FILE_LOCAL_PATH=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_SUDO: `sudo`
+
+
+#### Versions
+Default version: `11.8.0`
+
+* `11.7.0`
+* `11.8.0`
+* `12.0.0`
+* `12.1.1`
+* `12.2.0`
+* `12.3.2`
+* `12.4.1`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cuda-prebuilt/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install prebuilt cuda prebuilt-cuda install-prebuilt-cuda [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/destroy-terraform/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/destroy-terraform/index.md
new file mode 100644
index 0000000000..0cdd8886a8
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/destroy-terraform/index.md
@@ -0,0 +1,87 @@
+# destroy-terraform
+Automatically generated README for this automation recipe: **destroy-terraform**
+
+Category: **[Cloud automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/destroy-terraform/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/destroy-terraform/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "destroy terraform cmd" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=destroy,terraform,cmd
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "destroy terraform cmd "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'destroy,terraform,cmd'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "destroy terraform cmd"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/destroy-terraform/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/destroy-terraform/run.bat)
+___
+#### Script output
+```bash
+cmr "destroy terraform cmd " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/get-aws-cli/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/get-aws-cli/index.md
new file mode 100644
index 0000000000..9e06d804b0
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/get-aws-cli/index.md
@@ -0,0 +1,87 @@
+# get-aws-cli
+Automatically generated README for this automation recipe: **get-aws-cli**
+
+Category: **[Cloud automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aws-cli/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aws-cli/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get aws-cli aws cli" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,aws-cli,aws,cli
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get aws-cli aws cli "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,aws-cli,aws,cli'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get aws-cli aws cli"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aws-cli/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get aws-cli aws cli " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/get-terraform/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/get-terraform/index.md
new file mode 100644
index 0000000000..18c91c2641
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/get-terraform/index.md
@@ -0,0 +1,87 @@
+# get-terraform
+Automatically generated README for this automation recipe: **get-terraform**
+
+Category: **[Cloud automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-terraform/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-terraform/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get terraform get-terraform" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,terraform,get-terraform
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get terraform get-terraform "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,terraform,get-terraform'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get terraform get-terraform"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-terraform/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get terraform get-terraform " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/index.md
new file mode 100644
index 0000000000..84fc1dc1aa
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/index.md
@@ -0,0 +1,6 @@
+* [destroy-terraform](destroy-terraform/index.md)
+* [get-aws-cli](get-aws-cli/index.md)
+* [get-terraform](get-terraform/index.md)
+* [install-aws-cli](install-aws-cli/index.md)
+* [install-terraform-from-src](install-terraform-from-src/index.md)
+* [run-terraform](run-terraform/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/install-aws-cli/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/install-aws-cli/index.md
new file mode 100644
index 0000000000..5973d9c9cb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/install-aws-cli/index.md
@@ -0,0 +1,86 @@
+# install-aws-cli
+Automatically generated README for this automation recipe: **install-aws-cli**
+
+Category: **[Cloud automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-aws-cli/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install script aws-cli aws cli" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,script,aws-cli,aws,cli
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install script aws-cli aws cli "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,script,aws-cli,aws,cli'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install script aws-cli aws cli"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-aws-cli/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install script aws-cli aws cli " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/install-terraform-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/install-terraform-from-src/index.md
new file mode 100644
index 0000000000..d1cba41e11
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/install-terraform-from-src/index.md
@@ -0,0 +1,90 @@
+# install-terraform-from-src
+Automatically generated README for this automation recipe: **install-terraform-from-src**
+
+Category: **[Cloud automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-terraform-from-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install terraform from-src" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,terraform,from-src
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install terraform from-src "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,terraform,from-src'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install terraform from-src"
+ ```
+___
+
+#### Versions
+Default version: `main`
+
+* `main`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-terraform-from-src/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install terraform from-src " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/run-terraform/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/run-terraform/index.md
new file mode 100644
index 0000000000..f164a7352b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/run-terraform/index.md
@@ -0,0 +1,388 @@
+# run-terraform
+Automatically generated README for this automation recipe: **run-terraform**
+
+Category: **[Cloud automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/run-terraform/README-extra.md)
+
+
+---
+
+## Setup for Google Cloud Instances
+```
+sudo snap install google-cloud-cli --classic
+gcloud auth application-default login
+```
+
+The above two commands will install google-cloud-cli and authorizes the user to access it. Once done, you can start creating gcp instance using CM commands like below. To destroy an instance just repeat the same command with `--destroy` option.
+
+```
+cm run script --tags=run,terraform,_gcp,_gcp_project.mlperf-inference-tests --cminit
+```
+Here, `mlperf-inference-tests` is the name of the google project as created in [Google cloud console](https://console.cloud.google.com/apis/dashboard)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-terraform/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "run terraform" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=run,terraform[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "run terraform [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'run,terraform'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "run terraform[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_amazon-linux-2-kernel.#`
+ - ENV variables:
+ - TF_VAR_INSTANCE_IMAGE_OS: `amazon-linux-2-kernel.#`
+ * `_graviton`
+ - ENV variables:
+ - CM_TERRAFORM_AWS_GRAVITON_INSTANCE: `yes`
+ * `_inferentia`
+ - ENV variables:
+ - CM_TERRAFORM_AWS_INFERENTIA_INSTANCE: `yes`
+ * `_rhel.#`
+ - ENV variables:
+ - TF_VAR_INSTANCE_IMAGE_OS: `rhel.#`
+ * `_ubuntu.#`
+ - ENV variables:
+ - TF_VAR_INSTANCE_IMAGE_OS: `ubuntu.#`
+
+
+
+
+ * Group "**aws-instance-image**"
+
+ Click here to expand this section.
+
+ * `_amazon-linux-2-kernel.510,arm64,us-west-2`
+ - ENV variables:
+ - TF_VAR_INSTANCE_IMAGE: `ami-0f1a5f5ada0e7da53`
+ * `_aws_instance_image.#`
+ - ENV variables:
+ - TF_VAR_INSTANCE_IMAGE: `#`
+ * `_aws_instance_image.ami-0735c191cf914754d`
+ - ENV variables:
+ - TF_VAR_INSTANCE_IMAGE: `ami-0735c191cf914754d`
+ * `_aws_instance_image.ami-0a0d8589b597d65b3`
+ - ENV variables:
+ - TF_VAR_INSTANCE_IMAGE: `ami-0a0d8589b597d65b3`
+ * `_rhel.9,x86,us-west-2`
+ - ENV variables:
+ - TF_VAR_INSTANCE_IMAGE: `ami-0dda7e535b65b6469`
+ * `_ubuntu.2204,arm64,us-west-2`
+ - ENV variables:
+ - TF_VAR_INSTANCE_IMAGE: `ami-079f51a7bcca65b92`
+ * `_ubuntu.2204,x86,us-west-2`
+ - ENV variables:
+ - TF_VAR_INSTANCE_IMAGE: `ami-0735c191cf914754d`
+
+
+
+
+ * Group "**aws-instance-type**"
+
+ Click here to expand this section.
+
+ * `_a1.2xlarge`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `a1.2xlarge`
+ * `_a1.metal`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `a1.metal`
+ * `_a1.xlarge`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `a1.xlarge`
+ * `_aws_instance_type.#`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `#`
+ * `_c5.12xlarge`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `c5.12xlarge`
+ * `_c5.4xlarge`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `c5.4xlarge`
+ * `_c5d.9xlarge`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `c5d.9xlarge`
+ * `_g4dn.xlarge`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `g4dn.xlarge`
+ * `_inf1.2xlarge`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `inf1.2xlarge`
+ * `_inf1.xlarge`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `inf1.xlarge`
+ * `_inf2.8xlarge`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `inf2.8xlarge`
+ * `_inf2.xlarge`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `inf2.xlarge`
+ * `_m7g.2xlarge`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `m7g.2xlarge`
+ * `_m7g.xlarge`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `m7g.xlarge`
+ * `_t2.#`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `t2.#`
+ * `_t2.2xlarge`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `t2.2xlarge`
+ * `_t2.large`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `t2.large`
+ * `_t2.medium`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `t2.medium`
+ * `_t2.micro`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `t2.micro`
+ * `_t2.nano`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `t2.nano`
+ * `_t2.small`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `t2.small`
+ * `_t2.xlarge`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `t2.xlarge`
+
+
+
+
+ * Group "**cloud-provider**"
+
+ Click here to expand this section.
+
+ * **`_aws`** (default)
+ - ENV variables:
+ - CM_TERRAFORM_CONFIG_DIR_NAME: `aws`
+ * `_gcp`
+ - ENV variables:
+ - CM_TERRAFORM_CONFIG_DIR_NAME: `gcp`
+
+
+
+
+ * Group "**gcp-instance-image**"
+
+ Click here to expand this section.
+
+ * `_debian-cloud/debian-11`
+ - ENV variables:
+ - TF_VAR_INSTANCE_IMAGE: `debian-cloud/debian-11`
+ * `_gcp_instance_image.#`
+ - ENV variables:
+ - TF_VAR_INSTANCE_IMAGE: `#`
+ * `_ubuntu-2204-jammy-v20230114`
+ - ENV variables:
+ - TF_VAR_INSTANCE_IMAGE: `ubuntu-2204-jammy-v20230114`
+
+
+
+
+ * Group "**gcp-instance-type**"
+
+ Click here to expand this section.
+
+ * `_f1-micro`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `f1-micro`
+ * `_gcp_instance_type.#`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `#`
+ * `_n1-highmem.#`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `n1-highmem-#`
+ * `_n1-standard.#`
+ - ENV variables:
+ - TF_VAR_INSTANCE_TYPE: `n1-highmem-#`
+
+
+
+
+ * Group "**gcp-project**"
+
+ Click here to expand this section.
+
+ * `_gcp_project.#`
+ - ENV variables:
+ - TF_VAR_GCP_PROJECT: `#`
+
+
+
+
+ * Group "**instance-name**"
+
+ Click here to expand this section.
+
+ * `_instance_name.#`
+ - ENV variables:
+ - TF_VAR_INSTANCE_NAME: `#`
+
+
+
+
+ * Group "**platform**"
+
+ Click here to expand this section.
+
+ * `_arm64`
+ - ENV variables:
+ - CM_INSTANCE_PLATFORM: `arm64`
+ * **`_x86`** (default)
+ - ENV variables:
+ - CM_INSTANCE_PLATFORM: `x86`
+
+
+
+
+ * Group "**region**"
+
+ Click here to expand this section.
+
+ * `_region.#`
+ - ENV variables:
+ - TF_VAR_INSTANCE_REGION: `#`
+ * `_us-west-2`
+ - ENV variables:
+ - TF_VAR_INSTANCE_REGION: `us-west-2`
+
+
+
+
+ * Group "**storage-size**"
+
+ Click here to expand this section.
+
+ * `_storage_size.#`
+ - ENV variables:
+ - TF_VAR_DISK_GBS: `#`
+ * `_storage_size.8`
+ - ENV variables:
+ - TF_VAR_DISK_GBS: `8`
+
+
+
+
+ * Group "**zone**"
+
+ Click here to expand this section.
+
+ * `_zone.#`
+ - ENV variables:
+ - TF_VAR_INSTANCE_ZONE: `#`
+
+
+
+
+ ##### Default variations
+
+ `_aws,_x86`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--cminit=value` → `CM_TERRAFORM_CM_INIT=value`
+ * `--destroy=value` → `CM_DESTROY_TERRAFORM=value`
+ * `--gcp_credentials_json_file=value` → `CM_GCP_CREDENTIALS_JSON_PATH=value`
+ * `--key_file=value` → `CM_SSH_KEY_FILE=value`
+ * `--run_cmds=value` → `CM_TERRAFORM_RUN_COMMANDS=value`
+ * `--ssh_key_file=value` → `CM_SSH_KEY_FILE=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * TF_VAR_SECURITY_GROUP_ID: `sg-0783752c97d2e011d`
+ * TF_VAR_CPU_COUNT: `1`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-terraform/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "run terraform [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Collective-benchmarking/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Collective-benchmarking/index.md
new file mode 100644
index 0000000000..71dc75a6bb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Collective-benchmarking/index.md
@@ -0,0 +1 @@
+* [launch-benchmark](launch-benchmark/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Collective-benchmarking/launch-benchmark/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Collective-benchmarking/launch-benchmark/index.md
new file mode 100644
index 0000000000..4ad86bd2a4
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Collective-benchmarking/launch-benchmark/index.md
@@ -0,0 +1,81 @@
+# launch-benchmark
+Automatically generated README for this automation recipe: **launch-benchmark**
+
+Category: **[Collective benchmarking](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/launch-benchmark/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/launch-benchmark/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "launch benchmark" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=launch,benchmark
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "launch benchmark "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'launch,benchmark'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "launch benchmark"
+ ```
+___
+
+
+___
+#### Script output
+```bash
+cmr "launch benchmark " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-aocl/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-aocl/index.md
new file mode 100644
index 0000000000..7ff7292fbb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-aocl/index.md
@@ -0,0 +1,92 @@
+# get-aocl
+Automatically generated README for this automation recipe: **get-aocl**
+
+Category: **[Compiler automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aocl/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aocl/_cm.json)*
+* Output cached? *true*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get lib aocl amd-optimized amd" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,lib,aocl,amd-optimized,amd
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get lib aocl amd-optimized amd "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,lib,aocl,amd-optimized,amd'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get lib aocl amd-optimized amd"
+ ```
+___
+
+#### Versions
+Default version: `4.0`
+
+* `4.0`
+* `master`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aocl/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get lib aocl amd-optimized amd " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-cl/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-cl/index.md
new file mode 100644
index 0000000000..fd2dc6ceff
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-cl/index.md
@@ -0,0 +1,87 @@
+# Detect or install Microsoft C compiler
+Automatically generated README for this automation recipe: **get-cl**
+
+Category: **[Compiler automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cl/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cl/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get cl compiler c-compiler cpp-compiler get-cl" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,cl,compiler,c-compiler,cpp-compiler,get-cl
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get cl compiler c-compiler cpp-compiler get-cl "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,cl,compiler,c-compiler,cpp-compiler,get-cl'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get cl compiler c-compiler cpp-compiler get-cl"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ No run file exists for Linux/macOS
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cl/run.bat)
+___
+#### Script output
+```bash
+cmr "get cl compiler c-compiler cpp-compiler get-cl " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-compiler-flags/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-compiler-flags/index.md
new file mode 100644
index 0000000000..b1b46b23ec
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-compiler-flags/index.md
@@ -0,0 +1,80 @@
+# get-compiler-flags
+Automatically generated README for this automation recipe: **get-compiler-flags**
+
+Category: **[Compiler automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-compiler-flags/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get compiler-flags" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,compiler-flags
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get compiler-flags "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,compiler-flags'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get compiler-flags"
+ ```
+___
+
+
+___
+#### Script output
+```bash
+cmr "get compiler-flags " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-compiler-rust/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-compiler-rust/index.md
new file mode 100644
index 0000000000..90844ce509
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-compiler-rust/index.md
@@ -0,0 +1,86 @@
+# get-compiler-rust
+Automatically generated README for this automation recipe: **get-compiler-rust**
+
+Category: **[Compiler automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-compiler-rust/_cm.yaml)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get rust-compiler" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,rust-compiler
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get rust-compiler "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,rust-compiler'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get rust-compiler"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-compiler-rust/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get rust-compiler " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-gcc/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-gcc/index.md
new file mode 100644
index 0000000000..06913a2fc5
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-gcc/index.md
@@ -0,0 +1,87 @@
+# Detect or install GCC compiler
+Automatically generated README for this automation recipe: **get-gcc**
+
+Category: **[Compiler automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-gcc/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-gcc/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get gcc compiler c-compiler cpp-compiler get-gcc" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,gcc,compiler,c-compiler,cpp-compiler,get-gcc
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get gcc compiler c-compiler cpp-compiler get-gcc "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,gcc,compiler,c-compiler,cpp-compiler,get-gcc'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get gcc compiler c-compiler cpp-compiler get-gcc"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-gcc/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-gcc/run.bat)
+___
+#### Script output
+```bash
+cmr "get gcc compiler c-compiler cpp-compiler get-gcc " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-go/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-go/index.md
new file mode 100644
index 0000000000..7d691b01a8
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-go/index.md
@@ -0,0 +1,87 @@
+# get-go
+Automatically generated README for this automation recipe: **get-go**
+
+Category: **[Compiler automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-go/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-go/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get tool go get-go" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,tool,go,get-go
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get tool go get-go "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,tool,go,get-go'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get tool go get-go"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-go/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get tool go get-go " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-llvm/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-llvm/index.md
new file mode 100644
index 0000000000..8c5855c88a
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-llvm/index.md
@@ -0,0 +1,101 @@
+# Detect or install LLVM compiler
+Automatically generated README for this automation recipe: **get-llvm**
+
+Category: **[Compiler automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-llvm/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-llvm/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get llvm compiler c-compiler cpp-compiler get-llvm" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,llvm,compiler,c-compiler,cpp-compiler,get-llvm[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get llvm compiler c-compiler cpp-compiler get-llvm [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,llvm,compiler,c-compiler,cpp-compiler,get-llvm'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get llvm compiler c-compiler cpp-compiler get-llvm[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_from-prebuilt`
+ * `_from-src`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-llvm/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-llvm/run.bat)
+___
+#### Script output
+```bash
+cmr "get llvm compiler c-compiler cpp-compiler get-llvm [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/index.md
new file mode 100644
index 0000000000..d24e5e7035
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/index.md
@@ -0,0 +1,18 @@
+* [get-aocl](get-aocl/index.md)
+* [get-cl](get-cl/index.md)
+* [get-compiler-flags](get-compiler-flags/index.md)
+* [get-compiler-rust](get-compiler-rust/index.md)
+* [get-gcc](get-gcc/index.md)
+* [get-go](get-go/index.md)
+* [get-llvm](get-llvm/index.md)
+* [install-gcc-src](install-gcc-src/index.md)
+* [install-ipex-from-src](install-ipex-from-src/index.md)
+* [install-llvm-prebuilt](install-llvm-prebuilt/index.md)
+* [install-llvm-src](install-llvm-src/index.md)
+* [install-onednn-from-src](install-onednn-from-src/index.md)
+* [install-onnxruntime-from-src](install-onnxruntime-from-src/index.md)
+* [install-pytorch-from-src](install-pytorch-from-src/index.md)
+* [install-pytorch-kineto-from-src](install-pytorch-kineto-from-src/index.md)
+* [install-torchvision-from-src](install-torchvision-from-src/index.md)
+* [install-tpp-pytorch-extension](install-tpp-pytorch-extension/index.md)
+* [install-transformers-from-src](install-transformers-from-src/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-gcc-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-gcc-src/index.md
new file mode 100644
index 0000000000..54724e8a79
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-gcc-src/index.md
@@ -0,0 +1,90 @@
+# install-gcc-src
+Automatically generated README for this automation recipe: **install-gcc-src**
+
+Category: **[Compiler automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-gcc-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install src gcc src-gcc" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,src,gcc,src-gcc
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install src gcc src-gcc "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,src,gcc,src-gcc'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install src gcc src-gcc"
+ ```
+___
+
+#### Versions
+Default version: `12`
+
+* `master`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-gcc-src/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install src gcc src-gcc " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-ipex-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-ipex-from-src/index.md
new file mode 100644
index 0000000000..673ca83766
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-ipex-from-src/index.md
@@ -0,0 +1,128 @@
+# Build IPEX from sources
+Automatically generated README for this automation recipe: **install-ipex-from-src**
+
+Category: **[Compiler automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-ipex-from-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install get src from.src ipex src-ipex" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,get,src,from.src,ipex,src-ipex[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install get src from.src ipex src-ipex [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,get,src,from.src,ipex,src-ipex'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install get src from.src ipex src-ipex[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_branch.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT: `#`
+ * `_for-intel-mlperf-inference-v3.1-gptj`
+ - ENV variables:
+ - CM_CONDA_ENV: `yes`
+ * `_sha.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_SHA: `#`
+ * `_tag.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_TAG: `#`
+
+
+
+
+ * Group "**repo**"
+
+ Click here to expand this section.
+
+ * `_repo.#`
+ - ENV variables:
+ - CM_GIT_URL: `#`
+ * **`_repo.https://github.com/intel/intel-extension-for-pytorch`** (default)
+ - ENV variables:
+ - CM_GIT_URL: `https://github.com/intel/intel-extension-for-pytorch`
+
+
+
+
+ ##### Default variations
+
+ `_repo.https://github.com/intel/intel-extension-for-pytorch`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-ipex-from-src/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install get src from.src ipex src-ipex [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-llvm-prebuilt/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-llvm-prebuilt/index.md
new file mode 100644
index 0000000000..96038406ec
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-llvm-prebuilt/index.md
@@ -0,0 +1,90 @@
+# Install prebuilt LLVM compiler
+Automatically generated README for this automation recipe: **install-llvm-prebuilt**
+
+Category: **[Compiler automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/install-llvm-prebuilt/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-llvm-prebuilt/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install prebuilt llvm prebuilt-llvm install-prebuilt-llvm" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,prebuilt,llvm,prebuilt-llvm,install-prebuilt-llvm
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install prebuilt llvm prebuilt-llvm install-prebuilt-llvm "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,prebuilt,llvm,prebuilt-llvm,install-prebuilt-llvm'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install prebuilt llvm prebuilt-llvm install-prebuilt-llvm"
+ ```
+___
+
+#### Versions
+Default version: `15.0.6`
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-llvm-prebuilt/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/install-llvm-prebuilt/run.bat)
+___
+#### Script output
+```bash
+cmr "install prebuilt llvm prebuilt-llvm install-prebuilt-llvm " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-llvm-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-llvm-src/index.md
new file mode 100644
index 0000000000..655046f73b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-llvm-src/index.md
@@ -0,0 +1,160 @@
+# Build LLVM compiler from sources (can take >30 min)
+Automatically generated README for this automation recipe: **install-llvm-src**
+
+Category: **[Compiler automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-llvm-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install src llvm from.src src-llvm" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,src,llvm,from.src,src-llvm[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install src llvm from.src src-llvm [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,src,llvm,from.src,src-llvm'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install src llvm from.src src-llvm[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_branch.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT: `#`
+ * `_for-intel-mlperf-inference-v3.1-bert`
+ - ENV variables:
+ - CM_LLVM_CONDA_ENV: `yes`
+ * `_for-intel-mlperf-inference-v3.1-gptj`
+ - ENV variables:
+ - CM_LLVM_CONDA_ENV: `yes`
+ - CM_LLVM_16_INTEL_MLPERF_INFERENCE: `yes`
+ - USE_CUDA: `0`
+ - CUDA_VISIBLE_DEVICES: ``
+ * `_full-history`
+ * `_runtimes.#`
+ - ENV variables:
+ - CM_LLVM_ENABLE_RUNTIMES: `#`
+ * `_sha.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_SHA: `#`
+ * `_tag.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_TAG: `#`
+
+
+
+
+ * Group "**build-type**"
+
+ Click here to expand this section.
+
+ * `_debug`
+ - ENV variables:
+ - CM_LLVM_BUILD_TYPE: `debug`
+ * **`_release`** (default)
+ - ENV variables:
+ - CM_LLVM_BUILD_TYPE: `release`
+
+
+
+
+ * Group "**clang**"
+
+ Click here to expand this section.
+
+ * **`_clang`** (default)
+ - ENV variables:
+ - CM_LLVM_ENABLE_PROJECTS: `clang`
+
+
+
+
+ * Group "**repo**"
+
+ Click here to expand this section.
+
+ * `_repo.#`
+ - ENV variables:
+ - CM_GIT_URL: `#`
+
+
+
+
+ ##### Default variations
+
+ `_clang,_release`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-llvm-src/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install src llvm from.src src-llvm [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-onednn-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-onednn-from-src/index.md
new file mode 100644
index 0000000000..49bb4844bb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-onednn-from-src/index.md
@@ -0,0 +1,129 @@
+# Build oneDNN from sources
+Automatically generated README for this automation recipe: **install-onednn-from-src**
+
+Category: **[Compiler automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-onednn-from-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install get src from.src onednn src-onednn" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,get,src,from.src,onednn,src-onednn[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install get src from.src onednn src-onednn [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,get,src,from.src,onednn,src-onednn'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install get src from.src onednn src-onednn[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_branch.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT: `#`
+ * `_for-intel-mlperf-inference-v3.1-bert`
+ - ENV variables:
+ - CM_CONDA_ENV: `yes`
+ - CM_FOR_INTEL_MLPERF_INFERENCE: `yes`
+ * `_sha.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_SHA: `#`
+ * `_tag.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_TAG: `#`
+
+
+
+
+ * Group "**repo**"
+
+ Click here to expand this section.
+
+ * `_repo.#`
+ - ENV variables:
+ - CM_GIT_URL: `#`
+ * **`_repo.https://github.com/oneapi-src/oneDNN`** (default)
+ - ENV variables:
+ - CM_GIT_URL: `https://github.com/oneapi-src/oneDNN`
+
+
+
+
+ ##### Default variations
+
+ `_repo.https://github.com/oneapi-src/oneDNN`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-intel-mlperf-inference.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-onednn-from-src/run-intel-mlperf-inference.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install get src from.src onednn src-onednn [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-onnxruntime-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-onnxruntime-from-src/index.md
new file mode 100644
index 0000000000..011956f1dc
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-onnxruntime-from-src/index.md
@@ -0,0 +1,125 @@
+# Build onnxruntime from sources
+Automatically generated README for this automation recipe: **install-onnxruntime-from-src**
+
+Category: **[Compiler automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-onnxruntime-from-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install get src from.src onnxruntime src-onnxruntime" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,get,src,from.src,onnxruntime,src-onnxruntime[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install get src from.src onnxruntime src-onnxruntime [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,get,src,from.src,onnxruntime,src-onnxruntime'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install get src from.src onnxruntime src-onnxruntime[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_branch.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT: `#`
+ * `_cuda`
+ - ENV variables:
+ - CM_ONNXRUNTIME_GPU: `yes`
+ * `_sha.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_SHA: `#`
+ * `_tag.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_TAG: `#`
+
+
+
+
+ * Group "**repo**"
+
+ Click here to expand this section.
+
+ * **`_repo.https://github.com/Microsoft/onnxruntime`** (default)
+ - ENV variables:
+ - CM_GIT_URL: `https://github.com/Microsoft/onnxruntime`
+
+
+
+
+ ##### Default variations
+
+ `_repo.https://github.com/Microsoft/onnxruntime`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-onnxruntime-from-src/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install get src from.src onnxruntime src-onnxruntime [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-pytorch-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-pytorch-from-src/index.md
new file mode 100644
index 0000000000..4c7c185125
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-pytorch-from-src/index.md
@@ -0,0 +1,143 @@
+# Build pytorch from sources
+Automatically generated README for this automation recipe: **install-pytorch-from-src**
+
+Category: **[Compiler automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-pytorch-from-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install get src from.src pytorch src-pytorch" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,get,src,from.src,pytorch,src-pytorch[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install get src from.src pytorch src-pytorch [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,get,src,from.src,pytorch,src-pytorch'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install get src from.src pytorch src-pytorch[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_branch.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT: `#`
+ * `_cuda`
+ - ENV variables:
+ - CUDA_HOME: `<<>>`
+ - CUDNN_LIBRARY_PATH: `<<>>`
+ - CUDNN_INCLUDE_PATH: `<<>>`
+ - CUDA_NVCC_EXECUTABLE: `<<>>`
+ - USE_CUDA: `1`
+ - USE_CUDNN: `1`
+ - TORCH_CUDA_ARCH_LIST: `Ampere Ada Hopper`
+ - TORCH_CXX_FLAGS: `-D_GLIBCXX_USE_CXX11_ABI=1`
+ * `_for-intel-mlperf-inference-v3.1-bert`
+ - ENV variables:
+ - CM_CONDA_ENV: `yes`
+ - CM_MLPERF_INFERENCE_INTEL: `yes`
+ - USE_CUDA: `0`
+ * `_for-nvidia-mlperf-inference-v3.1`
+ * `_for-nvidia-mlperf-inference-v4.0`
+ * `_sha.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_SHA: `#`
+ * `_tag.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_TAG: `#`
+
+
+
+
+ * Group "**repo**"
+
+ Click here to expand this section.
+
+ * `_repo.#`
+ - ENV variables:
+ - CM_GIT_URL: `#`
+ * **`_repo.https://github.com/pytorch/pytorch`** (default)
+ - ENV variables:
+ - CM_GIT_URL: `https://github.com/pytorch/pytorch`
+
+
+
+
+ ##### Default variations
+
+ `_repo.https://github.com/pytorch/pytorch`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-intel-mlperf-inference-v3_1.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-pytorch-from-src/run-intel-mlperf-inference-v3_1.sh)
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-pytorch-from-src/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install get src from.src pytorch src-pytorch [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-pytorch-kineto-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-pytorch-kineto-from-src/index.md
new file mode 100644
index 0000000000..99cb8893fa
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-pytorch-kineto-from-src/index.md
@@ -0,0 +1,135 @@
+# Build pytorch kineto from sources
+Automatically generated README for this automation recipe: **install-pytorch-kineto-from-src**
+
+Category: **[Compiler automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-pytorch-kineto-from-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install get src from.src pytorch-kineto kineto src-pytorch-kineto" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,get,src,from.src,pytorch-kineto,kineto,src-pytorch-kineto[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install get src from.src pytorch-kineto kineto src-pytorch-kineto [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,get,src,from.src,pytorch-kineto,kineto,src-pytorch-kineto'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install get src from.src pytorch-kineto kineto src-pytorch-kineto[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_branch.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT: `#`
+ * `_cuda`
+ - ENV variables:
+ - CUDA_HOME: `<<>>`
+ - CUDA_NVCC_EXECUTABLE: `<<>>`
+ - CUDNN_INCLUDE_PATH: `<<>>`
+ - CUDNN_LIBRARY_PATH: `<<>>`
+ - TORCH_CUDA_ARCH_LIST: `Ampere Ada Hopper`
+ - TORCH_CXX_FLAGS: `-D_GLIBCXX_USE_CXX11_ABI=1`
+ - USE_CUDA: `1`
+ - USE_CUDNN: `1`
+ * `_sha.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_SHA: `#`
+ * `_tag.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_TAG: `#`
+
+
+
+
+ * Group "**repo**"
+
+ Click here to expand this section.
+
+ * `_repo.#`
+ - ENV variables:
+ - CM_GIT_URL: `#`
+ * **`_repo.https://github.com/pytorch/kineto`** (default)
+ - ENV variables:
+ - CM_GIT_URL: `https://github.com/pytorch/kineto`
+
+
+
+
+ ##### Default variations
+
+ `_repo.https://github.com/pytorch/kineto`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-pytorch-kineto-from-src/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install get src from.src pytorch-kineto kineto src-pytorch-kineto [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-torchvision-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-torchvision-from-src/index.md
new file mode 100644
index 0000000000..296969afbb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-torchvision-from-src/index.md
@@ -0,0 +1,137 @@
+# Build pytorchvision from sources
+Automatically generated README for this automation recipe: **install-torchvision-from-src**
+
+Category: **[Compiler automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-torchvision-from-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install get src from.src pytorchvision torchvision src-pytorchvision" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,get,src,from.src,pytorchvision,torchvision,src-pytorchvision[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install get src from.src pytorchvision torchvision src-pytorchvision [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,get,src,from.src,pytorchvision,torchvision,src-pytorchvision'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install get src from.src pytorchvision torchvision src-pytorchvision[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_branch.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT: `#`
+ * `_cuda`
+ - ENV variables:
+ - CUDA_HOME: `<<>>`
+ - CUDA_NVCC_EXECUTABLE: `<<>>`
+ - CUDNN_INCLUDE_PATH: `<<>>`
+ - CUDNN_LIBRARY_PATH: `<<>>`
+ - USE_CUDA: `1`
+ - USE_CUDNN: `1`
+ - TORCH_CUDA_ARCH_LIST: `Ampere Ada Hopper`
+ - TORCH_CXX_FLAGS: `-D_GLIBCXX_USE_CXX11_ABI=1`
+ * `_for-nvidia-mlperf-inference-v3.1`
+ * `_for-nvidia-mlperf-inference-v4.0`
+ * `_sha.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_SHA: `#`
+ * `_tag.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_TAG: `#`
+
+
+
+
+ * Group "**repo**"
+
+ Click here to expand this section.
+
+ * `_repo.#`
+ - ENV variables:
+ - CM_GIT_URL: `#`
+ * **`_repo.https://github.com/pytorch/vision`** (default)
+ - ENV variables:
+ - CM_GIT_URL: `https://github.com/pytorch/vision`
+
+
+
+
+ ##### Default variations
+
+ `_repo.https://github.com/pytorch/vision`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-torchvision-from-src/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install get src from.src pytorchvision torchvision src-pytorchvision [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-tpp-pytorch-extension/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-tpp-pytorch-extension/index.md
new file mode 100644
index 0000000000..2b681138d3
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-tpp-pytorch-extension/index.md
@@ -0,0 +1,128 @@
+# Build TPP-PEX from sources
+Automatically generated README for this automation recipe: **install-tpp-pytorch-extension**
+
+Category: **[Compiler automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tpp-pytorch-extension/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install get src from.src tpp-pex src-tpp-pex" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,get,src,from.src,tpp-pex,src-tpp-pex[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install get src from.src tpp-pex src-tpp-pex [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,get,src,from.src,tpp-pex,src-tpp-pex'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install get src from.src tpp-pex src-tpp-pex[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_branch.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT: `#`
+ * `_for-intel-mlperf-inference-v3.1-gptj`
+ - ENV variables:
+ - CM_CONDA_ENV: `yes`
+ * `_sha.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_SHA: `#`
+ * `_tag.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_TAG: `#`
+
+
+
+
+ * Group "**repo**"
+
+ Click here to expand this section.
+
+ * `_repo.#`
+ - ENV variables:
+ - CM_GIT_URL: `#`
+ * **`_repo.https://github.com/libxsmm/tpp-pytorch-extension`** (default)
+ - ENV variables:
+ - CM_GIT_URL: `https://github.com/libxsmm/tpp-pytorch-extension`
+
+
+
+
+ ##### Default variations
+
+ `_repo.https://github.com/libxsmm/tpp-pytorch-extension`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tpp-pytorch-extension/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install get src from.src tpp-pex src-tpp-pex [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-transformers-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-transformers-from-src/index.md
new file mode 100644
index 0000000000..78b59b7318
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-transformers-from-src/index.md
@@ -0,0 +1,128 @@
+# Build transformers from sources
+Automatically generated README for this automation recipe: **install-transformers-from-src**
+
+Category: **[Compiler automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-transformers-from-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install src from.src transformers src-transformers" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,src,from.src,transformers,src-transformers[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install src from.src transformers src-transformers [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,src,from.src,transformers,src-transformers'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install src from.src transformers src-transformers[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_branch.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT: `#`
+ * `_for-intel-mlperf-inference-v3.1-bert`
+ - ENV variables:
+ - CM_CONDA_ENV: `yes`
+ * `_sha.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_SHA: `#`
+ * `_tag.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_TAG: `#`
+
+
+
+
+ * Group "**repo**"
+
+ Click here to expand this section.
+
+ * `_repo.#`
+ - ENV variables:
+ - CM_GIT_URL: `#`
+ * **`_repo.https://github.com/pytorch/pytorch`** (default)
+ - ENV variables:
+ - CM_GIT_URL: `https://github.com/huggingface/transformers`
+
+
+
+
+ ##### Default variations
+
+ `_repo.https://github.com/pytorch/pytorch`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-transformers-from-src/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install src from.src transformers src-transformers [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Dashboard-automation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Dashboard-automation/index.md
new file mode 100644
index 0000000000..855c981b1c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Dashboard-automation/index.md
@@ -0,0 +1 @@
+* [publish-results-to-dashboard](publish-results-to-dashboard/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Dashboard-automation/publish-results-to-dashboard/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Dashboard-automation/publish-results-to-dashboard/index.md
new file mode 100644
index 0000000000..e496c921b7
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Dashboard-automation/publish-results-to-dashboard/index.md
@@ -0,0 +1,86 @@
+# publish-results-to-dashboard
+Automatically generated README for this automation recipe: **publish-results-to-dashboard**
+
+Category: **[Dashboard automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/publish-results-to-dashboard/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "publish-results dashboard" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=publish-results,dashboard
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "publish-results dashboard "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'publish-results,dashboard'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "publish-results dashboard"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/publish-results-to-dashboard/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/publish-results-to-dashboard/run.bat)
+___
+#### Script output
+```bash
+cmr "publish-results dashboard " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-android-sdk/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-android-sdk/index.md
new file mode 100644
index 0000000000..916d642951
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-android-sdk/index.md
@@ -0,0 +1,109 @@
+# get-android-sdk
+Automatically generated README for this automation recipe: **get-android-sdk**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-android-sdk/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-android-sdk/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get android sdk android-sdk" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,android,sdk,android-sdk [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get android sdk android-sdk " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,android,sdk,android-sdk'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get android sdk android-sdk" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--android_cmake_version=value` → `CM_ANDROID_CMAKE_VERSION=value`
+ * `--android_ndk_version=value` → `CM_ANDROID_NDK_VERSION=value`
+ * `--android_version=value` → `CM_ANDROID_VERSION=value`
+ * `--build_tools_version=value` → `CM_ANDROID_BUILD_TOOLS_VERSION=value`
+ * `--cmdline_tools_version=value` → `CM_ANDROID_CMDLINE_TOOLS_VERSION=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_ANDROID_BUILD_TOOLS_VERSION: `29.0.3`
+ * CM_ANDROID_CMAKE_VERSION: `3.6.4111459`
+ * CM_ANDROID_CMDLINE_TOOLS_URL: `https://dl.google.com/android/repository/commandlinetools-${CM_ANDROID_CMDLINE_TOOLS_OS}-${CM_ANDROID_CMDLINE_TOOLS_VERSION}_latest.zip`
+ * CM_ANDROID_CMDLINE_TOOLS_VERSION: `9123335`
+ * CM_ANDROID_NDK_VERSION: `21.3.6528147`
+ * CM_ANDROID_VERSION: `30`
+
+
+
+___
+#### Script output
+```bash
+cmr "get android sdk android-sdk " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-aria2/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-aria2/index.md
new file mode 100644
index 0000000000..020185dd73
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-aria2/index.md
@@ -0,0 +1,97 @@
+# get-aria2
+Automatically generated README for this automation recipe: **get-aria2**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aria2/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aria2/_cm.yaml)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get aria2 get-aria2" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,aria2,get-aria2 [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get aria2 get-aria2 " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,aria2,get-aria2'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get aria2 get-aria2" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--install=value` → `CM_FORCE_INSTALL=value`
+ * `--src=value` → `CM_ARIA2_BUILD_FROM_SRC=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aria2/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aria2/run.bat)
+___
+#### Script output
+```bash
+cmr "get aria2 get-aria2 " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-bazel/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-bazel/index.md
new file mode 100644
index 0000000000..b891263a82
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-bazel/index.md
@@ -0,0 +1,87 @@
+# get-bazel
+Automatically generated README for this automation recipe: **get-bazel**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-bazel/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-bazel/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get bazel get-bazel" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,bazel,get-bazel
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get bazel get-bazel "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,bazel,get-bazel'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get bazel get-bazel"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-bazel/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-bazel/run.bat)
+___
+#### Script output
+```bash
+cmr "get bazel get-bazel " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-blis/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-blis/index.md
new file mode 100644
index 0000000000..565ded732d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-blis/index.md
@@ -0,0 +1,110 @@
+# get-blis
+Automatically generated README for this automation recipe: **get-blis**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-blis/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-blis/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get lib blis" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,lib,blis[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get lib blis [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,lib,blis'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get lib blis[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**source**"
+
+ Click here to expand this section.
+
+ * `_amd`
+ * **`_flame`** (default)
+
+
+
+
+ ##### Default variations
+
+ `_flame`
+#### Versions
+Default version: `master`
+
+* `0.9.0`
+* `master`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-blis/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-blis/run.bat)
+___
+#### Script output
+```bash
+cmr "get lib blis [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-brew/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-brew/index.md
new file mode 100644
index 0000000000..4e31f81c9b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-brew/index.md
@@ -0,0 +1,86 @@
+# get-brew
+Automatically generated README for this automation recipe: **get-brew**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-brew/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get brew" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,brew
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get brew "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,brew'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get brew"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-brew/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get brew " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmake/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmake/index.md
new file mode 100644
index 0000000000..1b1f97f9c2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmake/index.md
@@ -0,0 +1,86 @@
+# get-cmake
+Automatically generated README for this automation recipe: **get-cmake**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cmake/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get cmake get-cmake" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,cmake,get-cmake
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get cmake get-cmake "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,cmake,get-cmake'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get cmake get-cmake"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cmake/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cmake/run.bat)
+___
+#### Script output
+```bash
+cmr "get cmake get-cmake " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmsis_5/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmsis_5/index.md
new file mode 100644
index 0000000000..21ab4045c1
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmsis_5/index.md
@@ -0,0 +1,123 @@
+# get-cmsis_5
+Automatically generated README for this automation recipe: **get-cmsis_5**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cmsis_5/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cmsis_5/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get cmsis cmsis_5 arm-software" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,cmsis,cmsis_5,arm-software[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get cmsis cmsis_5 arm-software [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,cmsis,cmsis_5,arm-software'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get cmsis cmsis_5 arm-software[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_recurse-submodules`
+ - ENV variables:
+ - CM_GIT_RECURSE_SUBMODULES: `--recurse-submodules`
+ * `_short-history`
+ - ENV variables:
+ - CM_GIT_DEPTH: `--depth 10`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_GIT_DEPTH: ``
+ * CM_GIT_PATCH: `no`
+ * CM_GIT_URL: `https://github.com/ARM-software/CMSIS_5.git`
+
+
+#### Versions
+Default version: `custom`
+
+* `custom`
+* `develop`
+* `master`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cmsis_5/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get cmsis cmsis_5 arm-software [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-docker/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-docker/index.md
new file mode 100644
index 0000000000..66e6de1a96
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-docker/index.md
@@ -0,0 +1,86 @@
+# get-docker
+Automatically generated README for this automation recipe: **get-docker**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-docker/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get install docker engine" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,install,docker,engine
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get install docker engine "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,install,docker,engine'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get install docker engine"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-docker/run-ubuntu.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get install docker engine " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-generic-sys-util/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-generic-sys-util/index.md
new file mode 100644
index 0000000000..72bcf7044c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-generic-sys-util/index.md
@@ -0,0 +1,214 @@
+# get-generic-sys-util
+Automatically generated README for this automation recipe: **get-generic-sys-util**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-sys-util/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get sys-util generic generic-sys-util" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,sys-util,generic,generic-sys-util[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get sys-util generic generic-sys-util [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,sys-util,generic,generic-sys-util'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get sys-util generic generic-sys-util[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_g++-12`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `g++12`
+ * `_gflags-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `gflags-dev`
+ * `_git-lfs`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `git-lfs`
+ * `_glog-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `glog-dev`
+ * `_libboost-all-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `libboost-all-dev`
+ * `_libbz2-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `libbz2_dev`
+ * `_libev-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `libev_dev`
+ * `_libffi-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `libffi_dev`
+ * `_libffi7`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `libffi7`
+ * `_libgdbm-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `libgdbm_dev`
+ * `_libgmock-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `libgmock-dev`
+ * `_liblzma-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `liblzma_dev`
+ * `_libmpfr-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `libmpfr-dev`
+ * `_libncurses-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `libncurses_dev`
+ * `_libnuma-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `libnuma-dev`
+ * `_libpci-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `libpci-dev`
+ * `_libre2-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `libre2-dev`
+ * `_libreadline-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `libreadline_dev`
+ * `_libsqlite3-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `libsqlite3_dev`
+ * `_libssl-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `libssl_dev`
+ * `_libudev-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `libudev-dev`
+ * `_ninja-build`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `ninja-build`
+ * `_nlohmann-json3-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `nlohmann_json3_dev`
+ * `_ntpdate`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `ntpdate`
+ * `_numactl`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `numactl`
+ * `_nvidia-cuda-toolkit`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `nvidia-cuda-toolkit`
+ * `_rapidjson-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `rapidjson-dev`
+ * `_rsync`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `rsync`
+ * `_screen`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `screen`
+ * `_sox`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `sox`
+ * `_tk-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `tk_dev`
+ * `_transmission`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `transmission`
+ * `_wget`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `wget`
+ * `_zlib`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `zlib`
+ * `_zlib1g-dev`
+ - ENV variables:
+ - CM_SYS_UTIL_NAME: `zlib1g_dev`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_CLEAN_DIRS: `bin`
+ * CM_SUDO: `sudo`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-sys-util/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get sys-util generic generic-sys-util [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-google-test/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-google-test/index.md
new file mode 100644
index 0000000000..92b5250f1a
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-google-test/index.md
@@ -0,0 +1,89 @@
+# get-google-test
+Automatically generated README for this automation recipe: **get-google-test**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-google-test/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get google-test googletest gtest test google" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,google-test,googletest,gtest,test,google
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get google-test googletest gtest test google "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,google-test,googletest,gtest,test,google'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get google-test googletest gtest test google"
+ ```
+___
+
+#### Versions
+Default version: `1.14.0`
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-google-test/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get google-test googletest gtest test google " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-java/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-java/index.md
new file mode 100644
index 0000000000..f0c5f5ac62
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-java/index.md
@@ -0,0 +1,124 @@
+# get-java
+Automatically generated README for this automation recipe: **get-java**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-java/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-java/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get java" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,java[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get java [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,java'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get java[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_install`
+ - ENV variables:
+ - CM_JAVA_PREBUILT_INSTALL: `on`
+
+
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--install=value` → `CM_JAVA_PREBUILT_INSTALL=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_JAVA_PREBUILT_VERSION: `19`
+ * CM_JAVA_PREBUILT_BUILD: `36`
+ * CM_JAVA_PREBUILT_URL: `https://download.java.net/openjdk/jdk${CM_JAVA_PREBUILT_VERSION}/ri/`
+ * CM_JAVA_PREBUILT_FILENAME: `openjdk-${CM_JAVA_PREBUILT_VERSION}+${CM_JAVA_PREBUILT_BUILD}_${CM_JAVA_PREBUILT_HOST_OS}-x64_bin`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-java/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-java/run.bat)
+___
+#### Script output
+```bash
+cmr "get java [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-javac/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-javac/index.md
new file mode 100644
index 0000000000..fae70fe46f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-javac/index.md
@@ -0,0 +1,124 @@
+# get-javac
+Automatically generated README for this automation recipe: **get-javac**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-javac/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-javac/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get javac" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,javac[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get javac [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,javac'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get javac[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_install`
+ - ENV variables:
+ - CM_JAVAC_PREBUILT_INSTALL: `on`
+
+
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--install=value` → `CM_JAVAC_PREBUILT_INSTALL=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_JAVAC_PREBUILT_VERSION: `19`
+ * CM_JAVAC_PREBUILT_BUILD: `36`
+ * CM_JAVAC_PREBUILT_URL: `https://download.java.net/openjdk/jdk${CM_JAVAC_PREBUILT_VERSION}/ri/`
+ * CM_JAVAC_PREBUILT_FILENAME: `openjdk-${CM_JAVAC_PREBUILT_VERSION}+${CM_JAVAC_PREBUILT_BUILD}_${CM_JAVAC_PREBUILT_HOST_OS}-x64_bin`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-javac/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-javac/run.bat)
+___
+#### Script output
+```bash
+cmr "get javac [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-armnn/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-armnn/index.md
new file mode 100644
index 0000000000..99b740e1f7
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-armnn/index.md
@@ -0,0 +1,92 @@
+# get-lib-armnn
+Automatically generated README for this automation recipe: **get-lib-armnn**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-armnn/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get lib-armnn lib armnn" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,lib-armnn,lib,armnn
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get lib-armnn lib armnn "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,lib-armnn,lib,armnn'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get lib-armnn lib armnn"
+ ```
+___
+
+#### Versions
+Default version: `23.11`
+
+* `22.11`
+* `23.05`
+* `23.11`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-armnn/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get lib-armnn lib armnn " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-dnnl/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-dnnl/index.md
new file mode 100644
index 0000000000..b0860ce568
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-dnnl/index.md
@@ -0,0 +1,91 @@
+# get-lib-dnnl
+Automatically generated README for this automation recipe: **get-lib-dnnl**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-dnnl/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get lib-dnnl lib dnnl" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,lib-dnnl,lib,dnnl
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get lib-dnnl lib dnnl "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,lib-dnnl,lib,dnnl'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get lib-dnnl lib dnnl"
+ ```
+___
+
+#### Versions
+Default version: `dev`
+
+* `2.2.4`
+* `dev`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-dnnl/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get lib-dnnl lib dnnl " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-protobuf/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-protobuf/index.md
new file mode 100644
index 0000000000..5786390ad8
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-protobuf/index.md
@@ -0,0 +1,107 @@
+# get-lib-protobuf
+Automatically generated README for this automation recipe: **get-lib-protobuf**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-protobuf/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get google-protobuf protobuf lib lib-protobuf google" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,google-protobuf,protobuf,lib,lib-protobuf,google[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get google-protobuf protobuf lib lib-protobuf google [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,google-protobuf,protobuf,lib,lib-protobuf,google'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get google-protobuf protobuf lib lib-protobuf google[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_branch.#`
+ - ENV variables:
+ - CM_TMP_GIT_CHECKOUT: `#`
+ * `_tag.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_TAG: `#`
+
+
+
+#### Versions
+Default version: `1.13.0`
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-protobuf/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get google-protobuf protobuf lib lib-protobuf google [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-qaic-api/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-qaic-api/index.md
new file mode 100644
index 0000000000..012b061ee0
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-qaic-api/index.md
@@ -0,0 +1,90 @@
+# get-lib-qaic-api
+Automatically generated README for this automation recipe: **get-lib-qaic-api**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-qaic-api/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get api lib-qaic-api lib qaic" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,api,lib-qaic-api,lib,qaic
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get api lib-qaic-api lib qaic "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,api,lib-qaic-api,lib,qaic'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get api lib-qaic-api lib qaic"
+ ```
+___
+
+#### Versions
+Default version: `master`
+
+* `master`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-qaic-api/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get api lib-qaic-api lib qaic " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-nvidia-docker/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-nvidia-docker/index.md
new file mode 100644
index 0000000000..11bd4a211f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-nvidia-docker/index.md
@@ -0,0 +1,86 @@
+# get-nvidia-docker
+Automatically generated README for this automation recipe: **get-nvidia-docker**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-nvidia-docker/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get install nvidia nvidia-container-toolkit nvidia-docker engine" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,install,nvidia,nvidia-container-toolkit,nvidia-docker,engine
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get install nvidia nvidia-container-toolkit nvidia-docker engine "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,install,nvidia,nvidia-container-toolkit,nvidia-docker,engine'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get install nvidia nvidia-container-toolkit nvidia-docker engine"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-nvidia-docker/run-ubuntu.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get install nvidia nvidia-container-toolkit nvidia-docker engine " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-openssl/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-openssl/index.md
new file mode 100644
index 0000000000..9eda1419e6
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-openssl/index.md
@@ -0,0 +1,87 @@
+# get-openssl
+Automatically generated README for this automation recipe: **get-openssl**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-openssl/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-openssl/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get openssl lib lib-openssl" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,openssl,lib,lib-openssl
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get openssl lib lib-openssl "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,openssl,lib,lib-openssl'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get openssl lib lib-openssl"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-openssl/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get openssl lib lib-openssl " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-rclone/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-rclone/index.md
new file mode 100644
index 0000000000..bf494897cf
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-rclone/index.md
@@ -0,0 +1,107 @@
+# get-rclone
+Automatically generated README for this automation recipe: **get-rclone**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-rclone/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get rclone" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,rclone[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get rclone [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,rclone'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get rclone[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_gdrive`
+ - ENV variables:
+ - CM_RCLONE_GDRIVE: `yes`
+ * `_system`
+ - ENV variables:
+ - CM_RCLONE_SYSTEM: `yes`
+
+
+
+#### Versions
+Default version: `1.65.2`
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-rclone/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-rclone/run.bat)
+___
+#### Script output
+```bash
+cmr "get rclone [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-cm/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-cm/index.md
new file mode 100644
index 0000000000..558aa7601b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-cm/index.md
@@ -0,0 +1,115 @@
+# get-sys-utils-cm
+Automatically generated README for this automation recipe: **get-sys-utils-cm**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/_cm.yaml)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get sys-utils-cm" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,sys-utils-cm[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get sys-utils-cm [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,sys-utils-cm'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get sys-utils-cm[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_user`
+ - ENV variables:
+ - CM_PYTHON_PIP_USER: `--user`
+
+
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--skip=value` → `CM_SKIP_SYS_UTILS=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-arch.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/run-arch.sh)
+ * [run-debian.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/run-debian.sh)
+ * [run-macos.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/run-macos.sh)
+ * [run-rhel.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/run-rhel.sh)
+ * [run-sles.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/run-sles.sh)
+ * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/run-ubuntu.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get sys-utils-cm [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-min/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-min/index.md
new file mode 100644
index 0000000000..7166bf7a26
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-min/index.md
@@ -0,0 +1,80 @@
+# get-sys-utils-min
+Automatically generated README for this automation recipe: **get-sys-utils-min**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-min/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get sys-utils-min" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,sys-utils-min
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get sys-utils-min "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,sys-utils-min'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get sys-utils-min"
+ ```
+___
+
+
+___
+#### Script output
+```bash
+cmr "get sys-utils-min " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-xilinx-sdk/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-xilinx-sdk/index.md
new file mode 100644
index 0000000000..147e888153
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-xilinx-sdk/index.md
@@ -0,0 +1,99 @@
+# get-xilinx-sdk
+Automatically generated README for this automation recipe: **get-xilinx-sdk**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-xilinx-sdk/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get xilinx sdk" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,xilinx,sdk [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get xilinx sdk " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,xilinx,sdk'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get xilinx sdk" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--input=value` → `CM_XILINX_SDK_FILE_PATH=value`
+
+
+
+#### Versions
+Default version: `2019.1`
+
+* `2019.1`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-xilinx-sdk/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get xilinx sdk " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-zendnn/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-zendnn/index.md
new file mode 100644
index 0000000000..58d04192eb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-zendnn/index.md
@@ -0,0 +1,86 @@
+# get-zendnn
+Automatically generated README for this automation recipe: **get-zendnn**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zendnn/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get zendnn amd from.src" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,zendnn,amd,from.src
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get zendnn amd from.src "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,zendnn,amd,from.src'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get zendnn amd from.src"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zendnn/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zendnn/run.bat)
+___
+#### Script output
+```bash
+cmr "get zendnn amd from.src " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/index.md
new file mode 100644
index 0000000000..2f5ec2c54f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/index.md
@@ -0,0 +1,30 @@
+* [get-android-sdk](get-android-sdk/index.md)
+* [get-aria2](get-aria2/index.md)
+* [get-bazel](get-bazel/index.md)
+* [get-blis](get-blis/index.md)
+* [get-brew](get-brew/index.md)
+* [get-cmake](get-cmake/index.md)
+* [get-cmsis_5](get-cmsis_5/index.md)
+* [get-docker](get-docker/index.md)
+* [get-generic-sys-util](get-generic-sys-util/index.md)
+* [get-google-test](get-google-test/index.md)
+* [get-java](get-java/index.md)
+* [get-javac](get-javac/index.md)
+* [get-lib-armnn](get-lib-armnn/index.md)
+* [get-lib-dnnl](get-lib-dnnl/index.md)
+* [get-lib-protobuf](get-lib-protobuf/index.md)
+* [get-lib-qaic-api](get-lib-qaic-api/index.md)
+* [get-nvidia-docker](get-nvidia-docker/index.md)
+* [get-openssl](get-openssl/index.md)
+* [get-rclone](get-rclone/index.md)
+* [get-sys-utils-cm](get-sys-utils-cm/index.md)
+* [get-sys-utils-min](get-sys-utils-min/index.md)
+* [get-xilinx-sdk](get-xilinx-sdk/index.md)
+* [get-zendnn](get-zendnn/index.md)
+* [install-bazel](install-bazel/index.md)
+* [install-cmake-prebuilt](install-cmake-prebuilt/index.md)
+* [install-gflags](install-gflags/index.md)
+* [install-github-cli](install-github-cli/index.md)
+* [install-intel-neural-speed-from-src](install-intel-neural-speed-from-src/index.md)
+* [install-numactl-from-src](install-numactl-from-src/index.md)
+* [install-openssl](install-openssl/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-bazel/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-bazel/index.md
new file mode 100644
index 0000000000..d9dee3a52c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-bazel/index.md
@@ -0,0 +1,90 @@
+# install-bazel
+Automatically generated README for this automation recipe: **install-bazel**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-bazel/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install script bazel" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,script,bazel
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install script bazel "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,script,bazel'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install script bazel"
+ ```
+___
+
+#### Versions
+Default version: `7.0.2`
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-aarch64.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-bazel/run-aarch64.sh)
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-bazel/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/install-bazel/run.bat)
+___
+#### Script output
+```bash
+cmr "install script bazel " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-cmake-prebuilt/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-cmake-prebuilt/index.md
new file mode 100644
index 0000000000..b02d49ed12
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-cmake-prebuilt/index.md
@@ -0,0 +1,89 @@
+# install-cmake-prebuilt
+Automatically generated README for this automation recipe: **install-cmake-prebuilt**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cmake-prebuilt/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install prebuilt cmake prebuilt-cmake install-prebuilt-cmake" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,prebuilt,cmake,prebuilt-cmake,install-prebuilt-cmake
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install prebuilt cmake prebuilt-cmake install-prebuilt-cmake "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,prebuilt,cmake,prebuilt-cmake,install-prebuilt-cmake'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install prebuilt cmake prebuilt-cmake install-prebuilt-cmake"
+ ```
+___
+
+#### Versions
+Default version: `3.28.3`
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cmake-prebuilt/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install prebuilt cmake prebuilt-cmake install-prebuilt-cmake " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-gflags/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-gflags/index.md
new file mode 100644
index 0000000000..adc3b09222
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-gflags/index.md
@@ -0,0 +1,90 @@
+# install-gflags
+Automatically generated README for this automation recipe: **install-gflags**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-gflags/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install src get gflags" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,src,get,gflags
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install src get gflags "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,src,get,gflags'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install src get gflags"
+ ```
+___
+
+#### Versions
+Default version: `2.2.2`
+
+* `2.2.2`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-gflags/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install src get gflags " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-github-cli/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-github-cli/index.md
new file mode 100644
index 0000000000..36276fc961
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-github-cli/index.md
@@ -0,0 +1,88 @@
+# install-github-cli
+Automatically generated README for this automation recipe: **install-github-cli**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-github-cli/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install gh github cli github-cli" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,gh,github,cli,github-cli
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install gh github cli github-cli "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,gh,github,cli,github-cli'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install gh github cli github-cli"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-macos.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-github-cli/run-macos.sh)
+ * [run-rhel.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-github-cli/run-rhel.sh)
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-github-cli/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install gh github cli github-cli " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-intel-neural-speed-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-intel-neural-speed-from-src/index.md
new file mode 100644
index 0000000000..36266b6616
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-intel-neural-speed-from-src/index.md
@@ -0,0 +1,126 @@
+# Build Intel Neural Speed from sources
+Automatically generated README for this automation recipe: **install-intel-neural-speed-from-src**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-intel-neural-speed-from-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install src from.src neural-speed intel-neural-speed" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,src,from.src,neural-speed,intel-neural-speed[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install src from.src neural-speed intel-neural-speed [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,src,from.src,neural-speed,intel-neural-speed'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install src from.src neural-speed intel-neural-speed[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_branch.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT: `#`
+ * `_for-intel-mlperf-inference-v4.0-gptj`
+ * `_sha.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_SHA: `#`
+ * `_tag.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_TAG: `#`
+
+
+
+
+ * Group "**repo**"
+
+ Click here to expand this section.
+
+ * `_repo.#`
+ - ENV variables:
+ - CM_GIT_URL: `#`
+ * **`_repo.https://github.com/intel/neural-speed`** (default)
+ - ENV variables:
+ - CM_GIT_URL: `https://github.com/intel/neural-speed`
+
+
+
+
+ ##### Default variations
+
+ `_repo.https://github.com/intel/neural-speed`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-intel-neural-speed-from-src/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install src from.src neural-speed intel-neural-speed [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-numactl-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-numactl-from-src/index.md
new file mode 100644
index 0000000000..6c2808bea9
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-numactl-from-src/index.md
@@ -0,0 +1,125 @@
+# Build numactl from sources
+Automatically generated README for this automation recipe: **install-numactl-from-src**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-numactl-from-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install src from.src numactl src-numactl" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,src,from.src,numactl,src-numactl[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install src from.src numactl src-numactl [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,src,from.src,numactl,src-numactl'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install src from.src numactl src-numactl[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_branch.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT: `#`
+ * `_sha.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_SHA: `#`
+ * `_tag.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_TAG: `#`
+
+
+
+
+ * Group "**repo**"
+
+ Click here to expand this section.
+
+ * `_repo.#`
+ - ENV variables:
+ - CM_GIT_URL: `#`
+ * **`_repo.https://github.com/numactl/numactl`** (default)
+ - ENV variables:
+ - CM_GIT_URL: `https://github.com/numactl/numactl`
+
+
+
+
+ ##### Default variations
+
+ `_repo.https://github.com/numactl/numactl`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-numactl-from-src/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install src from.src numactl src-numactl [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-openssl/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-openssl/index.md
new file mode 100644
index 0000000000..1e41f8cc60
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-openssl/index.md
@@ -0,0 +1,90 @@
+# install-openssl
+Automatically generated README for this automation recipe: **install-openssl**
+
+Category: **[Detection or installation of tools and artifacts](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-openssl/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install src openssl openssl-lib" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,src,openssl,openssl-lib
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install src openssl openssl-lib "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,src,openssl,openssl-lib'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install src openssl openssl-lib"
+ ```
+___
+
+#### Versions
+Default version: `1.1.1`
+
+* `1.1.1`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-openssl/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install src openssl openssl-lib " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/benchmark-program/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/benchmark-program/index.md
new file mode 100644
index 0000000000..0c940eff94
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/benchmark-program/index.md
@@ -0,0 +1,114 @@
+# benchmark-program
+Automatically generated README for this automation recipe: **benchmark-program**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-program/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-program/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "benchmark program" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=benchmark,program[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "benchmark program [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'benchmark,program'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "benchmark program[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_numactl`
+ * `_numactl-interleave`
+ * `_profile`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_ENABLE_NUMACTL: `0`
+ * CM_ENABLE_PROFILING: `0`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-program/run-ubuntu.sh)
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-program/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-program/run.bat)
+___
+#### Script output
+```bash
+cmr "benchmark program [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/compile-program/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/compile-program/index.md
new file mode 100644
index 0000000000..51e8e7ece6
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/compile-program/index.md
@@ -0,0 +1,97 @@
+# compile-program
+Automatically generated README for this automation recipe: **compile-program**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/compile-program/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/compile-program/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "compile program c-program cpp-program compile-program compile-c-program compile-cpp-program" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=compile,program,c-program,cpp-program,compile-program,compile-c-program,compile-cpp-program
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "compile program c-program cpp-program compile-program compile-c-program compile-cpp-program "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'compile,program,c-program,cpp-program,compile-program,compile-c-program,compile-cpp-program'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "compile program c-program cpp-program compile-program compile-c-program compile-cpp-program"
+ ```
+___
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * SKIP_RECOMPILE: `no`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/compile-program/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/compile-program/run.bat)
+___
+#### Script output
+```bash
+cmr "compile program c-program cpp-program compile-program compile-c-program compile-cpp-program " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/convert-csv-to-md/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/convert-csv-to-md/index.md
new file mode 100644
index 0000000000..30ee7342f9
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/convert-csv-to-md/index.md
@@ -0,0 +1,96 @@
+# convert-csv-to-md
+Automatically generated README for this automation recipe: **convert-csv-to-md**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/convert-csv-to-md/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "csv-to-md convert to-md from-csv" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=csv-to-md,convert,to-md,from-csv [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "csv-to-md convert to-md from-csv " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'csv-to-md,convert,to-md,from-csv'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "csv-to-md convert to-md from-csv" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--csv_file=value` → `CM_CSV_FILE=value`
+ * `--md_file=value` → `CM_MD_FILE=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/convert-csv-to-md/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/convert-csv-to-md/run.bat)
+___
+#### Script output
+```bash
+cmr "csv-to-md convert to-md from-csv " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/copy-to-clipboard/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/copy-to-clipboard/index.md
new file mode 100644
index 0000000000..3612d0dc59
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/copy-to-clipboard/index.md
@@ -0,0 +1,98 @@
+# copy-to-clipboard
+Automatically generated README for this automation recipe: **copy-to-clipboard**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/copy-to-clipboard/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "copy to clipboard copy-to-clipboard" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=copy,to,clipboard,copy-to-clipboard [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "copy to clipboard copy-to-clipboard " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'copy,to,clipboard,copy-to-clipboard'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "copy to clipboard copy-to-clipboard" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--add_quotes=value` → `CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES=value`
+ * `--q=value` → `CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES=value`
+ * `--t=value` → `CM_COPY_TO_CLIPBOARD_TEXT=value`
+ * `--text=value` → `CM_COPY_TO_CLIPBOARD_TEXT=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/copy-to-clipboard/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/copy-to-clipboard/run.bat)
+___
+#### Script output
+```bash
+cmr "copy to clipboard copy-to-clipboard " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/create-conda-env/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/create-conda-env/index.md
new file mode 100644
index 0000000000..bef5e7517a
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/create-conda-env/index.md
@@ -0,0 +1,101 @@
+# create-conda-env
+Automatically generated README for this automation recipe: **create-conda-env**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/create-conda-env/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "create get env conda-env conda-environment create-conda-environment" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=create,get,env,conda-env,conda-environment,create-conda-environment[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "create get env conda-env conda-environment create-conda-environment [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'create,get,env,conda-env,conda-environment,create-conda-environment'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "create get env conda-env conda-environment create-conda-environment[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_name.#`
+ - ENV variables:
+ - CM_CONDA_ENV_NAME: `#`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/create-conda-env/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "create get env conda-env conda-environment create-conda-environment [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/create-patch/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/create-patch/index.md
new file mode 100644
index 0000000000..05d7c42799
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/create-patch/index.md
@@ -0,0 +1,92 @@
+# create-patch
+Automatically generated README for this automation recipe: **create-patch**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/create-patch/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/create-patch/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "create patch" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=create,patch [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "create patch " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'create,patch'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "create patch" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--exclude=value` → `CM_CREATE_PATCH_EXCLUDE=value`
+ * `--new=value` → `CM_CREATE_PATCH_NEW=value`
+ * `--old=value` → `CM_CREATE_PATCH_OLD=value`
+
+
+
+
+___
+#### Script output
+```bash
+cmr "create patch " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/detect-sudo/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/detect-sudo/index.md
new file mode 100644
index 0000000000..9bb3a47f05
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/detect-sudo/index.md
@@ -0,0 +1,86 @@
+# detect-sudo
+Automatically generated README for this automation recipe: **detect-sudo**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-sudo/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "detect sudo access" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=detect,sudo,access
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "detect sudo access "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'detect,sudo,access'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "detect sudo access"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-sudo/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "detect sudo access " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-and-extract/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-and-extract/index.md
new file mode 100644
index 0000000000..1bb91aa3f1
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-and-extract/index.md
@@ -0,0 +1,145 @@
+# download-and-extract
+Automatically generated README for this automation recipe: **download-and-extract**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/download-and-extract/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/download-and-extract/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "download-and-extract file" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=download-and-extract,file[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "download-and-extract file [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'download-and-extract,file'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "download-and-extract file[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_extract`
+ - ENV variables:
+ - CM_DAE_EXTRACT_DOWNLOADED: `yes`
+ * `_keep`
+ - ENV variables:
+ - CM_EXTRACT_REMOVE_EXTRACTED: `no`
+ * `_no-remove-extracted`
+ - ENV variables:
+ - CM_EXTRACT_REMOVE_EXTRACTED: `no`
+ * `_url.#`
+ - ENV variables:
+ - CM_DAE_URL: `#`
+
+
+
+
+ * Group "**download-tool**"
+
+ Click here to expand this section.
+
+ * **`_cmutil`** (default)
+ * `_curl`
+ * `_gdown`
+ * `_rclone`
+ * `_torrent`
+ - ENV variables:
+ - CM_DAE_DOWNLOAD_USING_TORRENT: `yes`
+ - CM_TORRENT_DOWNLOADED_FILE_NAME: `<<>>`
+ - CM_TORRENT_DOWNLOADED_PATH_ENV_KEY: `CM_DAE_FILEPATH`
+ - CM_TORRENT_WAIT_UNTIL_COMPLETED: `yes`
+ * `_wget`
+
+
+
+
+ ##### Default variations
+
+ `_cmutil`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--download_path=value` → `CM_DOWNLOAD_PATH=value`
+ * `--extra_folder=value` → `CM_EXTRACT_TO_FOLDER=value`
+ * `--extract_path=value` → `CM_EXTRACT_PATH=value`
+ * `--from=value` → `CM_DOWNLOAD_LOCAL_FILE_PATH=value`
+ * `--local_path=value` → `CM_DOWNLOAD_LOCAL_FILE_PATH=value`
+ * `--store=value` → `CM_DOWNLOAD_PATH=value`
+ * `--to=value` → `CM_EXTRACT_PATH=value`
+ * `--url=value` → `CM_DAE_URL=value`
+ * `--verify=value` → `CM_VERIFY_SSL=value`
+
+
+
+
+___
+#### Script output
+```bash
+cmr "download-and-extract file [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-file/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-file/index.md
new file mode 100644
index 0000000000..2899d49416
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-file/index.md
@@ -0,0 +1,156 @@
+# download-file
+Automatically generated README for this automation recipe: **download-file**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/download-file/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/download-file/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "download file" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=download,file[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "download file [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'download,file'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "download file[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_url.#`
+ - ENV variables:
+ - CM_DOWNLOAD_URL: `#`
+
+
+
+
+ * Group "**download-tool**"
+
+ Click here to expand this section.
+
+ * **`_cmutil`** (default)
+ - ENV variables:
+ - CM_DOWNLOAD_TOOL: `cmutil`
+ * `_curl`
+ - ENV variables:
+ - CM_DOWNLOAD_TOOL: `curl`
+ * `_gdown`
+ - ENV variables:
+ - CM_DOWNLOAD_TOOL: `gdown`
+ * `_rclone`
+ - ENV variables:
+ - CM_DOWNLOAD_TOOL: `rclone`
+ * `_wget`
+ - ENV variables:
+ - CM_DOWNLOAD_TOOL: `wget`
+
+
+
+
+ ##### Default variations
+
+ `_cmutil`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--download_path=value` → `CM_DOWNLOAD_PATH=value`
+ * `--from=value` → `CM_DOWNLOAD_LOCAL_FILE_PATH=value`
+ * `--local_path=value` → `CM_DOWNLOAD_LOCAL_FILE_PATH=value`
+ * `--md5sum=value` → `CM_DOWNLOAD_CHECKSUM=value`
+ * `--output_file=value` → `CM_DOWNLOAD_FILENAME=value`
+ * `--store=value` → `CM_DOWNLOAD_PATH=value`
+ * `--url=value` → `CM_DOWNLOAD_URL=value`
+ * `--verify=value` → `CM_VERIFY_SSL=value`
+ * `--verify_ssl=value` → `CM_VERIFY_SSL=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_RCLONE_COPY_USING: `sync`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/download-file/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/download-file/run.bat)
+___
+#### Script output
+```bash
+cmr "download file [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-torrent/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-torrent/index.md
new file mode 100644
index 0000000000..3d2aecbdba
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-torrent/index.md
@@ -0,0 +1,120 @@
+# download-torrent
+Automatically generated README for this automation recipe: **download-torrent**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/download-torrent/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "download torrent download-torrent" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=download,torrent,download-torrent[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "download torrent download-torrent [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'download,torrent,download-torrent'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "download torrent download-torrent[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_torrent.#`
+ - ENV variables:
+ - CM_TORRENT_FILE: `#`
+
+
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--wait=value` → `CM_TORRENT_WAIT_UNTIL_COMPLETED=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_TORRENT_WAIT_UNTIL_COMPLETED: `no`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/download-torrent/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "download torrent download-torrent [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/extract-file/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/extract-file/index.md
new file mode 100644
index 0000000000..c58463bb89
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/extract-file/index.md
@@ -0,0 +1,120 @@
+# extract-file
+Automatically generated README for this automation recipe: **extract-file**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/extract-file/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/extract-file/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "extract file" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=extract,file[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "extract file [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'extract,file'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "extract file[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_keep`
+ - ENV variables:
+ - CM_EXTRACT_REMOVE_EXTRACTED: `no`
+ * `_no-remove-extracted`
+ - ENV variables:
+ - CM_EXTRACT_REMOVE_EXTRACTED: `no`
+ * `_path.#`
+ - ENV variables:
+ - CM_EXTRACT_FILEPATH: `#`
+
+
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--extra_folder=value` → `CM_EXTRACT_TO_FOLDER=value`
+ * `--extract_path=value` → `CM_EXTRACT_PATH=value`
+ * `--input=value` → `CM_EXTRACT_FILEPATH=value`
+ * `--to=value` → `CM_EXTRACT_PATH=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/extract-file/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/extract-file/run.bat)
+___
+#### Script output
+```bash
+cmr "extract file [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/fail/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/fail/index.md
new file mode 100644
index 0000000000..811924c373
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/fail/index.md
@@ -0,0 +1,96 @@
+# fail
+Automatically generated README for this automation recipe: **fail**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/fail/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/fail/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "fail filter" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=fail,filter[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "fail filter [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'fail,filter'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "fail filter[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_windows`
+ - ENV variables:
+ - CM_FAIL_WINDOWS: `True`
+
+
+
+
+___
+#### Script output
+```bash
+cmr "fail filter [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-conda/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-conda/index.md
new file mode 100644
index 0000000000..904deffb89
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-conda/index.md
@@ -0,0 +1,115 @@
+# get-conda
+Automatically generated README for this automation recipe: **get-conda**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-conda/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get conda get-conda" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,conda,get-conda[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get conda get-conda [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,conda,get-conda'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get conda get-conda[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_name.#`
+ - ENV variables:
+ - CM_CONDA_PREFIX_NAME: `#`
+
+
+
+
+ * Group "**conda-python**"
+
+ Click here to expand this section.
+
+ * `_python-3.#`
+ - ENV variables:
+ - CM_CONDA_PYTHON_VERSION: `3.#`
+ * `_python-3.8`
+ - ENV variables:
+ - CM_CONDA_PYTHON_VERSION: `3.8`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-conda/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-conda/run.bat)
+___
+#### Script output
+```bash
+cmr "get conda get-conda [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-git-repo/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-git-repo/index.md
new file mode 100644
index 0000000000..8108b29159
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-git-repo/index.md
@@ -0,0 +1,187 @@
+# get-git-repo
+Automatically generated README for this automation recipe: **get-git-repo**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-git-repo/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-git-repo/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get git repo repository clone" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,git,repo,repository,clone[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get git repo repository clone [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,git,repo,repository,clone'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get git repo repository clone[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_lfs`
+ - ENV variables:
+ - CM_GIT_REPO_NEEDS_LFS: `yes`
+ * `_no-recurse-submodules`
+ - ENV variables:
+ - CM_GIT_RECURSE_SUBMODULES: ``
+ * `_patch`
+ - ENV variables:
+ - CM_GIT_PATCH: `yes`
+ * `_submodules.#`
+ - ENV variables:
+ - CM_GIT_SUBMODULES: `#`
+
+
+
+
+ * Group "**checkout**"
+
+ Click here to expand this section.
+
+ * `_branch.#`
+ - ENV variables:
+ - CM_GIT_BRANCH: `#`
+ * `_sha.#`
+ - ENV variables:
+ - CM_GIT_SHA: `#`
+ * `_tag.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_TAG: `#`
+
+
+
+
+ * Group "**git-history**"
+
+ Click here to expand this section.
+
+ * `_full-history`
+ - ENV variables:
+ - CM_GIT_DEPTH: ``
+ * **`_short-history`** (default)
+ - ENV variables:
+ - CM_GIT_DEPTH: `--depth 5`
+
+
+
+
+ * Group "**repo**"
+
+ Click here to expand this section.
+
+ * `_repo.#`
+ - ENV variables:
+ - CM_GIT_URL: `#`
+
+
+
+
+ ##### Default variations
+
+ `_short-history`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--branch=value` → `CM_GIT_CHECKOUT=value`
+ * `--depth=value` → `CM_GIT_DEPTH=value`
+ * `--env_key=value` → `CM_GIT_ENV_KEY=value`
+ * `--folder=value` → `CM_GIT_CHECKOUT_FOLDER=value`
+ * `--patch=value` → `CM_GIT_PATCH=value`
+ * `--pull=value` → `CM_GIT_REPO_PULL=value`
+ * `--submodules=value` → `CM_GIT_RECURSE_SUBMODULES=value`
+ * `--update=value` → `CM_GIT_REPO_PULL=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_GIT_DEPTH: `--depth 4`
+ * CM_GIT_CHECKOUT_FOLDER: `repo`
+ * CM_GIT_PATCH: `no`
+ * CM_GIT_RECURSE_SUBMODULES: ` --recurse-submodules`
+ * CM_GIT_URL: `https://github.com/mlcommons/ck.git`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-git-repo/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-git-repo/run.bat)
+___
+#### Script output
+```bash
+cmr "get git repo repository clone [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-github-cli/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-github-cli/index.md
new file mode 100644
index 0000000000..06d0a33f03
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-github-cli/index.md
@@ -0,0 +1,86 @@
+# get-github-cli
+Automatically generated README for this automation recipe: **get-github-cli**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-github-cli/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get gh gh-cli github cli github-cli" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,gh,gh-cli,github,cli,github-cli
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get gh gh-cli github cli github-cli "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,gh,gh-cli,github,cli,github-cli'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get gh gh-cli github cli github-cli"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-github-cli/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-github-cli/run.bat)
+___
+#### Script output
+```bash
+cmr "get gh gh-cli github cli github-cli " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/index.md
new file mode 100644
index 0000000000..94dd95d37e
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/index.md
@@ -0,0 +1,22 @@
+* [benchmark-program](benchmark-program/index.md)
+* [compile-program](compile-program/index.md)
+* [convert-csv-to-md](convert-csv-to-md/index.md)
+* [copy-to-clipboard](copy-to-clipboard/index.md)
+* [create-conda-env](create-conda-env/index.md)
+* [create-patch](create-patch/index.md)
+* [detect-sudo](detect-sudo/index.md)
+* [download-and-extract](download-and-extract/index.md)
+* [download-file](download-file/index.md)
+* [download-torrent](download-torrent/index.md)
+* [extract-file](extract-file/index.md)
+* [fail](fail/index.md)
+* [get-conda](get-conda/index.md)
+* [get-git-repo](get-git-repo/index.md)
+* [get-github-cli](get-github-cli/index.md)
+* [pull-git-repo](pull-git-repo/index.md)
+* [push-csv-to-spreadsheet](push-csv-to-spreadsheet/index.md)
+* [set-device-settings-qaic](set-device-settings-qaic/index.md)
+* [set-echo-off-win](set-echo-off-win/index.md)
+* [set-performance-mode](set-performance-mode/index.md)
+* [set-sqlite-dir](set-sqlite-dir/index.md)
+* [tar-my-folder](tar-my-folder/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/pull-git-repo/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/pull-git-repo/index.md
new file mode 100644
index 0000000000..e600e5129c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/pull-git-repo/index.md
@@ -0,0 +1,95 @@
+# pull-git-repo
+Automatically generated README for this automation recipe: **pull-git-repo**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/pull-git-repo/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "pull git repo repository" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=pull,git,repo,repository [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "pull git repo repository " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'pull,git,repo,repository'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "pull git repo repository" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--path=value` → `CM_GIT_CHECKOUT_PATH=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/pull-git-repo/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "pull git repo repository " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/push-csv-to-spreadsheet/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/push-csv-to-spreadsheet/index.md
new file mode 100644
index 0000000000..1ea013cb08
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/push-csv-to-spreadsheet/index.md
@@ -0,0 +1,107 @@
+# push-csv-to-spreadsheet
+Automatically generated README for this automation recipe: **push-csv-to-spreadsheet**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/push-csv-to-spreadsheet/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "push google-spreadsheet spreadsheet push-to-google-spreadsheet" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=push,google-spreadsheet,spreadsheet,push-to-google-spreadsheet [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "push google-spreadsheet spreadsheet push-to-google-spreadsheet " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'push,google-spreadsheet,spreadsheet,push-to-google-spreadsheet'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "push google-spreadsheet spreadsheet push-to-google-spreadsheet" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--csv_file=value` → `CM_CSV_FILE_PATH=value`
+ * `--sheet_name=value` → `CM_GOOGLE_SHEET_NAME=value`
+ * `--spreadsheet_id=value` → `CM_GOOGLE_SPREADSHEET_ID=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_GOOGLE_SPREADSHEET_ID: `1gMHjXmFmwZR4-waPPyxy5Pc3VARqX3kKUWxkP97Xa6Y`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/push-csv-to-spreadsheet/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "push google-spreadsheet spreadsheet push-to-google-spreadsheet " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-device-settings-qaic/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-device-settings-qaic/index.md
new file mode 100644
index 0000000000..149675eddf
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-device-settings-qaic/index.md
@@ -0,0 +1,114 @@
+# set-device-settings-qaic
+Automatically generated README for this automation recipe: **set-device-settings-qaic**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/set-device-settings-qaic/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "set device qaic ai100 cloud performance power setting mode vc ecc" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=set,device,qaic,ai100,cloud,performance,power,setting,mode,vc,ecc[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "set device qaic ai100 cloud performance power setting mode vc ecc [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'set,device,qaic,ai100,cloud,performance,power,setting,mode,vc,ecc'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "set device qaic ai100 cloud performance power setting mode vc ecc[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_ecc`
+ - ENV variables:
+ - CM_QAIC_ECC: `yes`
+ * `_vc.#`
+ - ENV variables:
+ - CM_QAIC_VC: `#`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_QAIC_DEVICES: `0`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/set-device-settings-qaic/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "set device qaic ai100 cloud performance power setting mode vc ecc [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-echo-off-win/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-echo-off-win/index.md
new file mode 100644
index 0000000000..52ff14aa93
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-echo-off-win/index.md
@@ -0,0 +1,80 @@
+# set-echo-off-win
+Automatically generated README for this automation recipe: **set-echo-off-win**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/set-echo-off-win/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "set echo off win echo-off-win echo-off" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=set,echo,off,win,echo-off-win,echo-off
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "set echo off win echo-off-win echo-off "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'set,echo,off,win,echo-off-win,echo-off'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "set echo off win echo-off-win echo-off"
+ ```
+___
+
+
+___
+#### Script output
+```bash
+cmr "set echo off win echo-off-win echo-off " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-performance-mode/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-performance-mode/index.md
new file mode 100644
index 0000000000..3a1c6de335
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-performance-mode/index.md
@@ -0,0 +1,139 @@
+# set-performance-mode
+Automatically generated README for this automation recipe: **set-performance-mode**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/set-performance-mode/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "set system performance power mode" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=set,system,performance,power,mode[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "set system performance power mode [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'set,system,performance,power,mode'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "set system performance power mode[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_reproducibility`
+ - ENV variables:
+ - CM_SET_OS_PERFORMANCE_REPRODUCIBILITY_MODE: `yes`
+
+
+
+
+ * Group "**device**"
+
+ Click here to expand this section.
+
+ * **`_cpu`** (default)
+ - ENV variables:
+ - CM_SET_PERFORMANCE_MODE_OF: `cpu`
+
+
+
+
+ * Group "**performance-mode**"
+
+ Click here to expand this section.
+
+ * **`_performance`** (default)
+ - ENV variables:
+ - CM_SET_PERFORMANCE_MODE: `performance`
+
+
+
+
+ * Group "**power**"
+
+ Click here to expand this section.
+
+ * `_power`
+ - ENV variables:
+ - CM_SET_PERFORMANCE_MODE: `power`
+
+
+
+
+ ##### Default variations
+
+ `_cpu,_performance`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/set-performance-mode/run-ubuntu.sh)
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/set-performance-mode/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/set-performance-mode/run.bat)
+___
+#### Script output
+```bash
+cmr "set system performance power mode [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-sqlite-dir/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-sqlite-dir/index.md
new file mode 100644
index 0000000000..69229f604c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-sqlite-dir/index.md
@@ -0,0 +1,95 @@
+# set-sqlite-dir
+Automatically generated README for this automation recipe: **set-sqlite-dir**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/set-sqlite-dir/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "set sqlite dir sqlite-dir" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=set,sqlite,dir,sqlite-dir [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "set sqlite dir sqlite-dir " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'set,sqlite,dir,sqlite-dir'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "set sqlite dir sqlite-dir" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--path=value` → `CM_SQLITE_PATH=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/set-sqlite-dir/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/set-sqlite-dir/run.bat)
+___
+#### Script output
+```bash
+cmr "set sqlite dir sqlite-dir " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/tar-my-folder/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/tar-my-folder/index.md
new file mode 100644
index 0000000000..91b8bcaf08
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/tar-my-folder/index.md
@@ -0,0 +1,92 @@
+# tar-my-folder
+Automatically generated README for this automation recipe: **tar-my-folder**
+
+Category: **[DevOps automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/tar-my-folder/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/tar-my-folder/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "run tar" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=run,tar [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "run tar " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'run,tar'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "run tar" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--input_dir=value` → `CM_TAR_INPUT_DIR=value`
+ * `--outfile=value` → `CM_TAR_OUTFILE=value`
+ * `--output_dir=value` → `CM_TAR_OUTPUT_DIR=value`
+
+
+
+
+___
+#### Script output
+```bash
+cmr "run tar " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/build-docker-image/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/build-docker-image/index.md
new file mode 100644
index 0000000000..979bdc8a1c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/build-docker-image/index.md
@@ -0,0 +1,120 @@
+# build-docker-image
+Automatically generated README for this automation recipe: **build-docker-image**
+
+Category: **[Docker automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/build-docker-image/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/build-docker-image/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "build docker image docker-image dockerimage" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=build,docker,image,docker-image,dockerimage [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "build docker image docker-image dockerimage " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'build,docker,image,docker-image,dockerimage'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "build docker image docker-image dockerimage" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--cache=value` → `CM_DOCKER_CACHE=value`
+ * `--cm_repo=value` → `CM_MLOPS_REPO=value`
+ * `--docker_os=value` → `CM_DOCKER_OS=value`
+ * `--docker_os_version=value` → `CM_DOCKER_OS_VERSION=value`
+ * `--dockerfile=value` → `CM_DOCKERFILE_WITH_PATH=value`
+ * `--gh_token=value` → `CM_GH_TOKEN=value`
+ * `--image_name=value` → `CM_DOCKER_IMAGE_NAME=value`
+ * `--image_repo=value` → `CM_DOCKER_IMAGE_REPO=value`
+ * `--image_tag=value` → `CM_DOCKER_IMAGE_TAG=value`
+ * `--post_run_cmds=value` → `CM_DOCKER_POST_RUN_COMMANDS=value`
+ * `--pre_run_cmds=value` → `CM_DOCKER_PRE_RUN_COMMANDS=value`
+ * `--push_image=value` → `CM_DOCKER_PUSH_IMAGE=value`
+ * `--real_run=value` → `CM_REAL_RUN=value`
+ * `--script_tags=value` → `CM_DOCKER_RUN_SCRIPT_TAGS=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_DOCKER_IMAGE_REPO: `local`
+ * CM_DOCKER_IMAGE_TAG: `latest`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/build-docker-image/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/build-docker-image/run.bat)
+___
+#### Script output
+```bash
+cmr "build docker image docker-image dockerimage " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/build-dockerfile/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/build-dockerfile/index.md
new file mode 100644
index 0000000000..7e4ea3639b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/build-dockerfile/index.md
@@ -0,0 +1,145 @@
+# build-dockerfile
+Automatically generated README for this automation recipe: **build-dockerfile**
+
+Category: **[Docker automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/build-dockerfile/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/build-dockerfile/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "build dockerfile" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=build,dockerfile[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "build dockerfile [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'build,dockerfile'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "build dockerfile[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_slim`
+ - ENV variables:
+ - CM_DOCKER_BUILD_SLIM: `yes`
+
+
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--build=value` → `CM_BUILD_DOCKER_IMAGE=value`
+ * `--cache=value` → `CM_DOCKER_CACHE=value`
+ * `--cm_repo=value` → `CM_MLOPS_REPO=value`
+ * `--cm_repo_flags=value` → `CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO=value`
+ * `--cm_repos=value` → `CM_DOCKER_EXTRA_CM_REPOS=value`
+ * `--comments=value` → `CM_DOCKER_RUN_COMMENTS=value`
+ * `--copy_files=value` → `CM_DOCKER_COPY_FILES=value`
+ * `--docker_base_image=value` → `CM_DOCKER_IMAGE_BASE=value`
+ * `--docker_os=value` → `CM_DOCKER_OS=value`
+ * `--docker_os_version=value` → `CM_DOCKER_OS_VERSION=value`
+ * `--extra_sys_deps=value` → `CM_DOCKER_EXTRA_SYS_DEPS=value`
+ * `--fake_docker_deps=value` → `CM_DOCKER_FAKE_DEPS=value`
+ * `--fake_run_option=value` → `CM_DOCKER_FAKE_RUN_OPTION=value`
+ * `--file_path=value` → `CM_DOCKERFILE_WITH_PATH=value`
+ * `--gh_token=value` → `CM_GH_TOKEN=value`
+ * `--image_repo=value` → `CM_DOCKER_IMAGE_REPO=value`
+ * `--image_tag=value` → `CM_DOCKER_IMAGE_TAG=value`
+ * `--package_manager_update_cmd=value` → `CM_PACKAGE_MANAGER_UPDATE_CMD=value`
+ * `--pip_extra_flags=value` → `CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS=value`
+ * `--post_file=value` → `DOCKER_IMAGE_POST_FILE=value`
+ * `--post_run_cmds=value` → `CM_DOCKER_POST_RUN_COMMANDS=value`
+ * `--pre_run_cmds=value` → `CM_DOCKER_PRE_RUN_COMMANDS=value`
+ * `--push_image=value` → `CM_DOCKER_PUSH_IMAGE=value`
+ * `--real_run=value` → `CM_REAL_RUN=value`
+ * `--run_cmd=value` → `CM_DOCKER_RUN_CMD=value`
+ * `--run_cmd_extra=value` → `CM_DOCKER_RUN_CMD_EXTRA=value`
+ * `--script_tags=value` → `CM_DOCKER_RUN_SCRIPT_TAGS=value`
+ * `--skip_cm_sys_upgrade=value` → `CM_DOCKER_SKIP_CM_SYS_UPGRADE=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_DOCKER_BUILD_SLIM: `no`
+ * CM_DOCKER_IMAGE_EOL: `
+`
+ * CM_DOCKER_OS: `ubuntu`
+
+
+
+___
+#### Script output
+```bash
+cmr "build dockerfile [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/index.md
new file mode 100644
index 0000000000..ec6c83374e
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/index.md
@@ -0,0 +1,4 @@
+* [build-docker-image](build-docker-image/index.md)
+* [build-dockerfile](build-dockerfile/index.md)
+* [prune-docker](prune-docker/index.md)
+* [run-docker-container](run-docker-container/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/prune-docker/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/prune-docker/index.md
new file mode 100644
index 0000000000..e1025b409c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/prune-docker/index.md
@@ -0,0 +1,86 @@
+# prune-docker
+Automatically generated README for this automation recipe: **prune-docker**
+
+Category: **[Docker automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/prune-docker/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "prune docker" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=prune,docker
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "prune docker "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'prune,docker'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "prune docker"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prune-docker/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/prune-docker/run.bat)
+___
+#### Script output
+```bash
+cmr "prune docker " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/run-docker-container/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/run-docker-container/index.md
new file mode 100644
index 0000000000..68266dfa59
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/run-docker-container/index.md
@@ -0,0 +1,130 @@
+# run-docker-container
+Automatically generated README for this automation recipe: **run-docker-container**
+
+Category: **[Docker automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/run-docker-container/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/run-docker-container/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "run docker container" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=run,docker,container [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "run docker container " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'run,docker,container'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "run docker container" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--all_gpus=value` → `CM_DOCKER_ADD_ALL_GPUS=value`
+ * `--base=value` → `CM_DOCKER_IMAGE_BASE=value`
+ * `--cache=value` → `CM_DOCKER_CACHE=value`
+ * `--cm_repo=value` → `CM_MLOPS_REPO=value`
+ * `--detached=value` → `CM_DOCKER_DETACHED_MODE=value`
+ * `--device=value` → `CM_DOCKER_ADD_DEVICE=value`
+ * `--docker_image_base=value` → `CM_DOCKER_IMAGE_BASE=value`
+ * `--docker_os=value` → `CM_DOCKER_OS=value`
+ * `--docker_os_version=value` → `CM_DOCKER_OS_VERSION=value`
+ * `--extra_run_args=value` → `CM_DOCKER_EXTRA_RUN_ARGS=value`
+ * `--fake_run_option=value` → `CM_DOCKER_FAKE_RUN_OPTION=value`
+ * `--gh_token=value` → `CM_GH_TOKEN=value`
+ * `--image_name=value` → `CM_DOCKER_IMAGE_NAME=value`
+ * `--image_repo=value` → `CM_DOCKER_IMAGE_REPO=value`
+ * `--image_tag=value` → `CM_DOCKER_IMAGE_TAG=value`
+ * `--image_tag_extra=value` → `CM_DOCKER_IMAGE_TAG_EXTRA=value`
+ * `--interactive=value` → `CM_DOCKER_INTERACTIVE_MODE=value`
+ * `--it=value` → `CM_DOCKER_INTERACTIVE=value`
+ * `--mounts=value` → `CM_DOCKER_VOLUME_MOUNTS=value`
+ * `--num_gpus=value` → `CM_DOCKER_ADD_NUM_GPUS=value`
+ * `--pass_user_group=value` → `CM_DOCKER_PASS_USER_GROUP=value`
+ * `--port_maps=value` → `CM_DOCKER_PORT_MAPS=value`
+ * `--post_run_cmds=value` → `CM_DOCKER_POST_RUN_COMMANDS=value`
+ * `--pre_run_cmds=value` → `CM_DOCKER_PRE_RUN_COMMANDS=value`
+ * `--real_run=value` → `CM_REAL_RUN=value`
+ * `--recreate=value` → `CM_DOCKER_IMAGE_RECREATE=value`
+ * `--run_cmd=value` → `CM_DOCKER_RUN_CMD=value`
+ * `--run_cmd_extra=value` → `CM_DOCKER_RUN_CMD_EXTRA=value`
+ * `--save_script=value` → `CM_DOCKER_SAVE_SCRIPT=value`
+ * `--script_tags=value` → `CM_DOCKER_RUN_SCRIPT_TAGS=value`
+ * `--shm_size=value` → `CM_DOCKER_SHM_SIZE=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_DOCKER_DETACHED_MODE: `yes`
+
+
+
+___
+#### Script output
+```bash
+cmr "run docker container " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/GUI/gui/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/GUI/gui/index.md
new file mode 100644
index 0000000000..65f72b8c8d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/GUI/gui/index.md
@@ -0,0 +1,174 @@
+# gui
+Automatically generated README for this automation recipe: **gui**
+
+Category: **[GUI](..)**
+
+License: **Apache 2.0**
+
+Developers: [Grigori Fursin](https://cKnowledge.org/gfursin)
+
+
+---
+
+This CM script provides a unified GUI to run CM scripts using [Streamlit library](https://streamlit.io).
+
+If you want to run it in a cloud (Azure, AWS, GCP), you need to open some port and test that you can reach it from outside.
+
+By default, streamlit uses port 8501 but you can change it as follows:
+
+```bash
+cm run script "cm gui" --port 80
+```
+
+If you have troubles accessing this port, use this simple python module to test if your port is open:
+```bash
+python3 -m http.server 80
+```
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/gui/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "cm gui cm-gui script-gui cm-script-gui streamlit" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=cm,gui,cm-gui,script-gui,cm-script-gui,streamlit[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "cm gui cm-gui script-gui cm-script-gui streamlit [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'cm,gui,cm-gui,script-gui,cm-script-gui,streamlit'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "cm gui cm-gui script-gui cm-script-gui streamlit[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**app**"
+
+ Click here to expand this section.
+
+ * `_chatgpt`
+ - ENV variables:
+ - CM_GUI_APP: `chatgpt`
+ * `_graph`
+ - ENV variables:
+ - CM_GUI_APP: `graph`
+ * `_main`
+ - ENV variables:
+ - CM_GUI_APP: `app`
+ * `_playground`
+ - ENV variables:
+ - CM_GUI_APP: `playground`
+
+
+
+=== "Input Flags"
+
+
+ #### Input Flags
+
+ * --**script:** script tags
+ * --**app:** gui app
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--address=value` → `CM_GUI_ADDRESS=value`
+ * `--app=value` → `CM_GUI_APP=value`
+ * `--exp_key_c=value` → `CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_C=value`
+ * `--exp_key_s=value` → `CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_S=value`
+ * `--exp_key_x=value` → `CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_X=value`
+ * `--exp_key_y=value` → `CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_Y=value`
+ * `--exp_max_results=value` → `CM_GUI_GRAPH_EXPERIMENT_MAX_RESULTS=value`
+ * `--exp_name=value` → `CM_GUI_GRAPH_EXPERIMENT_NAME=value`
+ * `--exp_tags=value` → `CM_GUI_GRAPH_EXPERIMENT_TAGS=value`
+ * `--exp_title=value` → `CM_GUI_GRAPH_EXPERIMENT_TITLE=value`
+ * `--exp_uid=value` → `CM_GUI_GRAPH_EXPERIMENT_RESULT_UID=value`
+ * `--no_browser=value` → `CM_GUI_NO_BROWSER=value`
+ * `--no_run=value` → `CM_GUI_NO_RUN=value`
+ * `--port=value` → `CM_GUI_PORT=value`
+ * `--prefix=value` → `CM_GUI_SCRIPT_PREFIX_LINUX=value`
+ * `--script=value` → `CM_GUI_SCRIPT_TAGS=value`
+ * `--title=value` → `CM_GUI_TITLE=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_GUI_EXTRA_CMD: ``
+ * CM_GUI_SCRIPT_PREFIX_LINUX: `gnome-terminal --`
+ * CM_GUI_APP: `app`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/gui/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/gui/run.bat)
+___
+#### Script output
+```bash
+cmr "cm gui cm-gui script-gui cm-script-gui streamlit [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/GUI/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/GUI/index.md
new file mode 100644
index 0000000000..b30ad21816
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/GUI/index.md
@@ -0,0 +1 @@
+* [gui](gui/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/get-ck-repo-mlops/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/get-ck-repo-mlops/index.md
new file mode 100644
index 0000000000..4c43e6df21
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/get-ck-repo-mlops/index.md
@@ -0,0 +1,86 @@
+# get-ck-repo-mlops
+Automatically generated README for this automation recipe: **get-ck-repo-mlops**
+
+Category: **[Legacy CK support](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ck-repo-mlops/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get ck-repo mlops ck-repo-mlops" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,ck-repo,mlops,ck-repo-mlops
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get ck-repo mlops ck-repo-mlops "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,ck-repo,mlops,ck-repo-mlops'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get ck-repo mlops ck-repo-mlops"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ck-repo-mlops/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ck-repo-mlops/run.bat)
+___
+#### Script output
+```bash
+cmr "get ck-repo mlops ck-repo-mlops " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/get-ck/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/get-ck/index.md
new file mode 100644
index 0000000000..954ae2c2e6
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/get-ck/index.md
@@ -0,0 +1,86 @@
+# get-ck
+Automatically generated README for this automation recipe: **get-ck**
+
+Category: **[Legacy CK support](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ck/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get ck ck-framework" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,ck,ck-framework
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get ck ck-framework "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,ck,ck-framework'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get ck ck-framework"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ck/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ck/run.bat)
+___
+#### Script output
+```bash
+cmr "get ck ck-framework " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/index.md
new file mode 100644
index 0000000000..7f099c797c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/index.md
@@ -0,0 +1,2 @@
+* [get-ck](get-ck/index.md)
+* [get-ck-repo-mlops](get-ck-repo-mlops/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/add-custom-nvidia-system/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/add-custom-nvidia-system/index.md
new file mode 100644
index 0000000000..50a57accee
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/add-custom-nvidia-system/index.md
@@ -0,0 +1,109 @@
+# add-custom-nvidia-system
+Automatically generated README for this automation recipe: **add-custom-nvidia-system**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/add-custom-nvidia-system/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/add-custom-nvidia-system/_cm.yaml)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "add custom system nvidia" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=add,custom,system,nvidia[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "add custom system nvidia [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'add,custom,system,nvidia'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "add custom system nvidia[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**code**"
+
+ Click here to expand this section.
+
+ * `_ctuning`
+ * `_custom`
+ * `_go`
+ * `_mlcommons`
+ * `_nvidia-only`
+
+
+
+#### Versions
+* `r2.1`
+* `r3.0`
+* `r3.1`
+* `r4.0`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/add-custom-nvidia-system/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "add custom system nvidia [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/benchmark-any-mlperf-inference-implementation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/benchmark-any-mlperf-inference-implementation/index.md
new file mode 100644
index 0000000000..471ff7f8d9
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/benchmark-any-mlperf-inference-implementation/index.md
@@ -0,0 +1,192 @@
+# benchmark-any-mlperf-inference-implementation
+Automatically generated README for this automation recipe: **benchmark-any-mlperf-inference-implementation**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-any-mlperf-inference-implementation/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "benchmark run natively all inference any mlperf mlperf-implementation implementation mlperf-models" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=benchmark,run,natively,all,inference,any,mlperf,mlperf-implementation,implementation,mlperf-models[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "benchmark run natively all inference any mlperf mlperf-implementation implementation mlperf-models [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'benchmark,run,natively,all,inference,any,mlperf,mlperf-implementation,implementation,mlperf-models'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "benchmark run natively all inference any mlperf mlperf-implementation implementation mlperf-models[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**implementation**"
+
+ Click here to expand this section.
+
+ * `_deepsparse`
+ - ENV variables:
+ - DIVISION: `open`
+ - IMPLEMENTATION: `deepsparse`
+ * `_intel`
+ - ENV variables:
+ - IMPLEMENTATION: `intel`
+ * `_mil`
+ - ENV variables:
+ - IMPLEMENTATION: `mil`
+ * `_nvidia`
+ - ENV variables:
+ - IMPLEMENTATION: `nvidia-original`
+ * `_qualcomm`
+ - ENV variables:
+ - IMPLEMENTATION: `qualcomm`
+ * `_reference`
+ - ENV variables:
+ - IMPLEMENTATION: `reference`
+ * `_tflite-cpp`
+ - ENV variables:
+ - IMPLEMENTATION: `tflite_cpp`
+
+
+
+
+ * Group "**power**"
+
+ Click here to expand this section.
+
+ * **`_performance-only`** (default)
+ * `_power`
+ - ENV variables:
+ - POWER: `True`
+
+
+
+
+ * Group "**sut**"
+
+ Click here to expand this section.
+
+ * `_aws-dl2q.24xlarge`
+ * `_macbookpro-m1`
+ - ENV variables:
+ - CATEGORY: `edge`
+ - DIVISION: `closed`
+ * `_mini`
+ * `_orin`
+ * `_orin.32g`
+ - ENV variables:
+ - CATEGORY: `edge`
+ - DIVISION: `closed`
+ * `_phoenix`
+ - ENV variables:
+ - CATEGORY: `edge`
+ - DIVISION: `closed`
+ * `_rb6`
+ * `_rpi4`
+ * `_sapphire-rapids.24c`
+ - ENV variables:
+ - CATEGORY: `edge`
+ - DIVISION: `closed`
+
+
+
+
+ ##### Default variations
+
+ `_performance-only`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--backends=value` → `BACKENDS=value`
+ * `--category=value` → `CATEGORY=value`
+ * `--devices=value` → `DEVICES=value`
+ * `--division=value` → `DIVISION=value`
+ * `--extra_args=value` → `EXTRA_ARGS=value`
+ * `--models=value` → `MODELS=value`
+ * `--power_server=value` → `POWER_SERVER=value`
+ * `--power_server_port=value` → `POWER_SERVER_PORT=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * DIVISION: `open`
+ * CATEGORY: `edge`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-template.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-any-mlperf-inference-implementation/run-template.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "benchmark run natively all inference any mlperf mlperf-implementation implementation mlperf-models [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/build-mlperf-inference-server-nvidia/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/build-mlperf-inference-server-nvidia/index.md
new file mode 100644
index 0000000000..a6c9522ceb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/build-mlperf-inference-server-nvidia/index.md
@@ -0,0 +1,164 @@
+# build-mlperf-inference-server-nvidia
+Automatically generated README for this automation recipe: **build-mlperf-inference-server-nvidia**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/build-mlperf-inference-server-nvidia/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/build-mlperf-inference-server-nvidia/_cm.yaml)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "build mlcommons mlperf inference inference-server server nvidia-harness nvidia" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=build,mlcommons,mlperf,inference,inference-server,server,nvidia-harness,nvidia[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "build mlcommons mlperf inference inference-server server nvidia-harness nvidia [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'build,mlcommons,mlperf,inference,inference-server,server,nvidia-harness,nvidia'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "build mlcommons mlperf inference inference-server server nvidia-harness nvidia[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**code**"
+
+ Click here to expand this section.
+
+ * **`_ctuning`** (default)
+ * `_custom`
+ * `_go`
+ * `_mlcommons`
+ * `_nvidia-only`
+
+
+
+
+ * Group "**device**"
+
+ Click here to expand this section.
+
+ * `_cpu`
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `cpu`
+ * **`_cuda`** (default)
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `cuda`
+ - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart`
+ * `_inferentia`
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `inferentia`
+
+
+
+
+ * Group "**version**"
+
+ Click here to expand this section.
+
+ * `_r4.0`
+
+
+
+
+ ##### Default variations
+
+ `_ctuning,_cuda`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--clean=value` → `CM_MAKE_CLEAN=value`
+ * `--custom_system=value` → `CM_CUSTOM_SYSTEM_NVIDIA=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_MAKE_BUILD_COMMAND: `build`
+ * CM_MAKE_CLEAN: `no`
+ * CM_CUSTOM_SYSTEM_NVIDIA: `yes`
+
+
+#### Versions
+Default version: `r3.1`
+
+* `r2.1`
+* `r3.0`
+* `r3.1`
+* `r4.0`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/build-mlperf-inference-server-nvidia/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "build mlcommons mlperf inference inference-server server nvidia-harness nvidia [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-inference-submission/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-inference-submission/index.md
new file mode 100644
index 0000000000..566e49acc4
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-inference-submission/index.md
@@ -0,0 +1,122 @@
+# generate-mlperf-inference-submission
+Automatically generated README for this automation recipe: **generate-mlperf-inference-submission**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-inference-submission/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-inference-submission/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "generate submission mlperf mlperf-inference inference mlcommons inference-submission mlperf-inference-submission mlcommons-inference-submission" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=generate,submission,mlperf,mlperf-inference,inference,mlcommons,inference-submission,mlperf-inference-submission,mlcommons-inference-submission [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "generate submission mlperf mlperf-inference inference mlcommons inference-submission mlperf-inference-submission mlcommons-inference-submission " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'generate,submission,mlperf,mlperf-inference,inference,mlcommons,inference-submission,mlperf-inference-submission,mlcommons-inference-submission'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "generate submission mlperf mlperf-inference inference mlcommons inference-submission mlperf-inference-submission mlcommons-inference-submission" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--analyzer_settings_file=value` → `CM_MLPERF_POWER_ANALYZER_SETTINGS_FILE_PATH=value`
+ * `--category=value` → `CM_MLPERF_SUBMISSION_CATEGORY=value`
+ * `--clean=value` → `CM_MLPERF_CLEAN_SUBMISSION_DIR=value`
+ * `--dashboard=value` → `CM_MLPERF_DASHBOARD=value`
+ * `--dashboard_wb_project=value` → `CM_MLPERF_DASHBOARD_WANDB_PROJECT=value`
+ * `--device=value` → `CM_MLPERF_DEVICE=value`
+ * `--division=value` → `CM_MLPERF_SUBMISSION_DIVISION=value`
+ * `--duplicate=value` → `CM_MLPERF_DUPLICATE_SCENARIO_RESULTS=value`
+ * `--hw_name=value` → `CM_HW_NAME=value`
+ * `--hw_notes_extra=value` → `CM_MLPERF_SUT_HW_NOTES_EXTRA=value`
+ * `--infer_scenario_results=value` → `CM_MLPERF_DUPLICATE_SCENARIO_RESULTS=value`
+ * `--power_settings_file=value` → `CM_MLPERF_POWER_SETTINGS_FILE_PATH=value`
+ * `--preprocess=value` → `CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR=value`
+ * `--preprocess_submission=value` → `CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR=value`
+ * `--results_dir=value` → `CM_MLPERF_INFERENCE_RESULTS_DIR_=value`
+ * `--run_checker=value` → `CM_RUN_SUBMISSION_CHECKER=value`
+ * `--run_style=value` → `CM_MLPERF_RUN_STYLE=value`
+ * `--skip_truncation=value` → `CM_SKIP_TRUNCATE_ACCURACY=value`
+ * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value`
+ * `--submitter=value` → `CM_MLPERF_SUBMITTER=value`
+ * `--sw_notes_extra=value` → `CM_MLPERF_SUT_SW_NOTES_EXTRA=value`
+ * `--tar=value` → `CM_TAR_SUBMISSION_DIR=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_RUN_MLPERF_ACCURACY: `on`
+ * CM_MLPERF_RUN_STYLE: `valid`
+
+
+
+___
+#### Script output
+```bash
+cmr "generate submission mlperf mlperf-inference inference mlcommons inference-submission mlperf-inference-submission mlcommons-inference-submission " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-inference-user-conf/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-inference-user-conf/index.md
new file mode 100644
index 0000000000..c56840eb31
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-inference-user-conf/index.md
@@ -0,0 +1,122 @@
+# generate-mlperf-inference-user-conf
+Automatically generated README for this automation recipe: **generate-mlperf-inference-user-conf**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+Developers: [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Grigori Fursin](https://cKnowledge.org/gfursin)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-inference-user-conf/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "generate mlperf inference user-conf inference-user-conf" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=generate,mlperf,inference,user-conf,inference-user-conf [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "generate mlperf inference user-conf inference-user-conf " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'generate,mlperf,inference,user-conf,inference-user-conf'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "generate mlperf inference user-conf inference-user-conf" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value`
+ * `--hw_name=value` → `CM_HW_NAME=value`
+ * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value`
+ * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value`
+ * `--num_threads=value` → `CM_NUM_THREADS=value`
+ * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value`
+ * `--output_dir=value` → `OUTPUT_BASE_DIR=value`
+ * `--performance_sample_count=value` → `CM_MLPERF_PERFORMANCE_SAMPLE_COUNT=value`
+ * `--power=value` → `CM_MLPERF_POWER=value`
+ * `--regenerate_files=value` → `CM_REGENERATE_MEASURE_FILES=value`
+ * `--rerun=value` → `CM_RERUN=value`
+ * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value`
+ * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value`
+ * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value`
+ * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value`
+ * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value`
+ * `--test_query_count=value` → `CM_TEST_QUERY_COUNT=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_MLPERF_LOADGEN_MODE: `accuracy`
+ * CM_MLPERF_LOADGEN_SCENARIO: `Offline`
+ * CM_OUTPUT_FOLDER_NAME: `test_results`
+ * CM_MLPERF_RUN_STYLE: `test`
+ * CM_TEST_QUERY_COUNT: `10`
+ * CM_FAST_FACTOR: `5`
+ * CM_MLPERF_QUANTIZATION: `False`
+
+
+
+___
+#### Script output
+```bash
+cmr "generate mlperf inference user-conf inference-user-conf " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-report/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-report/index.md
new file mode 100644
index 0000000000..74555e5003
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-report/index.md
@@ -0,0 +1,107 @@
+# generate-mlperf-tiny-report
+Automatically generated README for this automation recipe: **generate-mlperf-tiny-report**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+Developers: [Grigori Fursin](https://cKnowledge.org/gfursin)
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-tiny-report/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-tiny-report/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "generate mlperf tiny mlperf-tiny report" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=generate,mlperf,tiny,mlperf-tiny,report [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "generate mlperf tiny mlperf-tiny report " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'generate,mlperf,tiny,mlperf-tiny,report'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "generate mlperf tiny mlperf-tiny report" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--repo_tags=value` → `CM_IMPORT_TINYMLPERF_REPO_TAGS=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_IMPORT_TINYMLPERF_REPO_TAGS: `1.1-private`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run_submission_checker.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-tiny-report/run_submission_checker.sh)
+=== "Windows"
+
+ * [run_submission_checker.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-tiny-report/run_submission_checker.bat)
+___
+#### Script output
+```bash
+cmr "generate mlperf tiny mlperf-tiny report " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-submission/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-submission/index.md
new file mode 100644
index 0000000000..3f583de519
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-submission/index.md
@@ -0,0 +1,81 @@
+# generate-mlperf-tiny-submission
+Automatically generated README for this automation recipe: **generate-mlperf-tiny-submission**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-tiny-submission/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-tiny-submission/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "generate submission mlperf mlperf-tiny tiny mlcommons tiny-submission mlperf-tiny-submission mlcommons-tiny-submission" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=generate,submission,mlperf,mlperf-tiny,tiny,mlcommons,tiny-submission,mlperf-tiny-submission,mlcommons-tiny-submission
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "generate submission mlperf mlperf-tiny tiny mlcommons tiny-submission mlperf-tiny-submission mlcommons-tiny-submission "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'generate,submission,mlperf,mlperf-tiny,tiny,mlcommons,tiny-submission,mlperf-tiny-submission,mlcommons-tiny-submission'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "generate submission mlperf mlperf-tiny tiny mlcommons tiny-submission mlperf-tiny-submission mlcommons-tiny-submission"
+ ```
+___
+
+
+___
+#### Script output
+```bash
+cmr "generate submission mlperf mlperf-tiny tiny mlcommons tiny-submission mlperf-tiny-submission mlcommons-tiny-submission " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-nvidia-engine/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-nvidia-engine/index.md
new file mode 100644
index 0000000000..05f7576e24
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-nvidia-engine/index.md
@@ -0,0 +1,165 @@
+# generate-nvidia-engine
+Automatically generated README for this automation recipe: **generate-nvidia-engine**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+
+
+---
+
+This CM script is in draft stage
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-nvidia-engine/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "generate engine mlperf inference nvidia" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=generate,engine,mlperf,inference,nvidia[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "generate engine mlperf inference nvidia [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'generate,engine,mlperf,inference,nvidia'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "generate engine mlperf inference nvidia[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_batch_size.#`
+ - ENV variables:
+ - CM_MODEL_BATCH_SIZE: `None`
+ * `_copy_streams.#`
+ - ENV variables:
+ - CM_GPU_COPY_STREAMS: `None`
+ * `_cuda`
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `gpu`
+ - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart`
+
+
+
+
+ * Group "**device**"
+
+ Click here to expand this section.
+
+ * **`_cpu`** (default)
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `cpu`
+
+
+
+
+ * Group "**model**"
+
+ Click here to expand this section.
+
+ * **`_resnet50`** (default)
+ - ENV variables:
+ - CM_MODEL: `resnet50`
+ * `_retinanet`
+ - ENV variables:
+ - CM_MODEL: `retinanet`
+
+
+
+
+ ##### Default variations
+
+ `_cpu,_resnet50`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_BATCH_COUNT: `1`
+ * CM_BATCH_SIZE: `1`
+ * CM_LOADGEN_SCENARIO: `Offline`
+ * CM_GPU_COPY_STREAMS: `1`
+ * CM_TENSORRT_WORKSPACE_SIZE: `4194304`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-nvidia-engine/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "generate engine mlperf inference nvidia [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-intel-scratch-space/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-intel-scratch-space/index.md
new file mode 100644
index 0000000000..4267c81467
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-intel-scratch-space/index.md
@@ -0,0 +1,117 @@
+# get-mlperf-inference-intel-scratch-space
+Automatically generated README for this automation recipe: **get-mlperf-inference-intel-scratch-space**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-intel-scratch-space/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get mlperf inference intel scratch space" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,mlperf,inference,intel,scratch,space[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get mlperf inference intel scratch space [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,mlperf,inference,intel,scratch,space'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get mlperf inference intel scratch space[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**version**"
+
+ Click here to expand this section.
+
+ * `_version.#`
+ - ENV variables:
+ - CM_INTEL_SCRATCH_SPACE_VERSION: `#`
+ * **`_version.4_0`** (default)
+ - ENV variables:
+ - CM_INTEL_SCRATCH_SPACE_VERSION: `4_0`
+
+
+
+
+ ##### Default variations
+
+ `_version.4_0`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--scratch_path=value` → `MLPERF_INTEL_SCRATCH_PATH=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-intel-scratch-space/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-intel-scratch-space/run.bat)
+___
+#### Script output
+```bash
+cmr "get mlperf inference intel scratch space [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-loadgen/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-loadgen/index.md
new file mode 100644
index 0000000000..85084ac54b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-loadgen/index.md
@@ -0,0 +1,144 @@
+# get-mlperf-inference-loadgen
+Automatically generated README for this automation recipe: **get-mlperf-inference-loadgen**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-loadgen/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-loadgen/_cm.yaml)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get loadgen inference inference-loadgen mlperf mlcommons" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,loadgen,inference,inference-loadgen,mlperf,mlcommons[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get loadgen inference inference-loadgen mlperf mlcommons [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,loadgen,inference,inference-loadgen,mlperf,mlcommons'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get loadgen inference inference-loadgen mlperf mlcommons[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_copy`
+ * `_custom-python`
+ - ENV variables:
+ - CM_TMP_USE_CUSTOM_PYTHON: `on`
+ * `_download`
+ - ENV variables:
+ - CM_DOWNLOAD_CHECKSUM: `af3f9525965b2c1acc348fb882a5bfd1`
+ - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: `YES`
+ - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: `https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0`
+ - CM_MLPERF_INFERENCE_LOADGEN_VERSION: `v3.1`
+ - CM_VERIFY_SSL: `False`
+ * `_download_v3.1`
+ - ENV variables:
+ - CM_DOWNLOAD_CHECKSUM: `af3f9525965b2c1acc348fb882a5bfd1`
+ - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: `YES`
+ - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: `https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0`
+ - CM_MLPERF_INFERENCE_LOADGEN_VERSION: `v3.1`
+ - CM_VERIFY_SSL: `False`
+ * `_download_v4.0`
+ - ENV variables:
+ - CM_DOWNLOAD_CHECKSUM: `b4d97525d9ad0539a64667f2a3ca20c5`
+ - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: `YES`
+ - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: `https://www.dropbox.com/scl/fi/gk5e9kziju5t56umxyzyx/loadgen.zip?rlkey=vsie4xnzml1inpjplm5cg7t54&dl=0`
+ - CM_MLPERF_INFERENCE_LOADGEN_VERSION: `v4.0`
+ - CM_VERIFY_SSL: `False`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_SHARED_BUILD: `no`
+
+
+#### Versions
+Default version: `master`
+
+* `custom`
+* `main`
+* `master`
+* `pybind_fix`
+* `r2.1`
+* `r3.0`
+* `r3.1`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-loadgen/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-loadgen/run.bat)
+___
+#### Script output
+```bash
+cmr "get loadgen inference inference-loadgen mlperf mlcommons [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-common-code/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-common-code/index.md
new file mode 100644
index 0000000000..ca4cb291c5
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-common-code/index.md
@@ -0,0 +1,105 @@
+# get-mlperf-inference-nvidia-common-code
+Automatically generated README for this automation recipe: **get-mlperf-inference-nvidia-common-code**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-nvidia-common-code/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-nvidia-common-code/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get nvidia mlperf inference common-code" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,nvidia,mlperf,inference,common-code[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get nvidia mlperf inference common-code [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,nvidia,mlperf,inference,common-code'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get nvidia mlperf inference common-code[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**repo-owner**"
+
+ Click here to expand this section.
+
+ * `_ctuning`
+ * `_custom`
+ * `_go`
+ * `_mlcommons`
+ * `_nvidia-only`
+
+
+
+#### Versions
+Default version: `r3.1`
+
+* `r2.1`
+* `r3.0`
+* `r3.1`
+* `r4.0`
+
+___
+#### Script output
+```bash
+cmr "get nvidia mlperf inference common-code [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-scratch-space/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-scratch-space/index.md
new file mode 100644
index 0000000000..6f7fd5229d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-scratch-space/index.md
@@ -0,0 +1,118 @@
+# get-mlperf-inference-nvidia-scratch-space
+Automatically generated README for this automation recipe: **get-mlperf-inference-nvidia-scratch-space**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-nvidia-scratch-space/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-nvidia-scratch-space/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get mlperf inference nvidia scratch space" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,mlperf,inference,nvidia,scratch,space[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get mlperf inference nvidia scratch space [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,mlperf,inference,nvidia,scratch,space'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get mlperf inference nvidia scratch space[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**version**"
+
+ Click here to expand this section.
+
+ * `_version.#`
+ - ENV variables:
+ - CM_NVIDIA_SCRATCH_SPACE_VERSION: `#`
+ * **`_version.4_0`** (default)
+ - ENV variables:
+ - CM_NVIDIA_SCRATCH_SPACE_VERSION: `4_0`
+
+
+
+
+ ##### Default variations
+
+ `_version.4_0`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--scratch_path=value` → `CM_NVIDIA_MLPERF_SCRATCH_PATH=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-nvidia-scratch-space/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-nvidia-scratch-space/run.bat)
+___
+#### Script output
+```bash
+cmr "get mlperf inference nvidia scratch space [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-results-dir/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-results-dir/index.md
new file mode 100644
index 0000000000..b67ffcfdad
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-results-dir/index.md
@@ -0,0 +1,111 @@
+# get-mlperf-inference-results-dir
+Automatically generated README for this automation recipe: **get-mlperf-inference-results-dir**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-results-dir/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get mlperf inference results dir directory" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,mlperf,inference,results,dir,directory[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get mlperf inference results dir directory [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,mlperf,inference,results,dir,directory'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get mlperf inference results dir directory[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**version**"
+
+ Click here to expand this section.
+
+ * `_version.#`
+ - ENV variables:
+ - CM_MLPERF_INFERENCE_RESULTS_VERSION: `#`
+ * **`_version.4_0`** (default)
+ - ENV variables:
+ - CM_MLPERF_INFERENCE_RESULTS_VERSION: `4_0`
+
+
+
+
+ ##### Default variations
+
+ `_version.4_0`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--results_dir=value` → `CM_MLPERF_INFERENCE_RESULTS_DIR=value`
+
+
+
+
+___
+#### Script output
+```bash
+cmr "get mlperf inference results dir directory [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-results/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-results/index.md
new file mode 100644
index 0000000000..9297150e26
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-results/index.md
@@ -0,0 +1,132 @@
+# get-mlperf-inference-results
+Automatically generated README for this automation recipe: **get-mlperf-inference-results**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-results/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-results/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get results inference inference-results mlcommons mlperf" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,results,inference,inference-results,mlcommons,mlperf[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get results inference inference-results mlcommons mlperf [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,results,inference,inference-results,mlcommons,mlperf'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get results inference inference-results mlcommons mlperf[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**source-repo**"
+
+ Click here to expand this section.
+
+ * `_ctuning`
+ - ENV variables:
+ - GITHUB_REPO_OWNER: `ctuning`
+ * `_custom`
+ - ENV variables:
+ - GITHUB_REPO_OWNER: `arjunsuresh`
+ * `_go`
+ - ENV variables:
+ - GITHUB_REPO_OWNER: `GATEOverflow`
+ * **`_mlcommons`** (default)
+ - ENV variables:
+ - GITHUB_REPO_OWNER: `mlcommons`
+ * `_nvidia-only`
+ - ENV variables:
+ - GITHUB_REPO_OWNER: `GATEOverflow`
+ - NVIDIA_ONLY: `yes`
+
+
+
+
+ ##### Default variations
+
+ `_mlcommons`
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_GIT_CHECKOUT: `master`
+ * CM_GIT_DEPTH: `--depth 1`
+ * CM_GIT_PATCH: `no`
+
+
+#### Versions
+Default version: `v3.1`
+
+* `v2.1`
+* `v3.0`
+* `v3.1`
+* `v4.0`
+
+___
+#### Script output
+```bash
+cmr "get results inference inference-results mlcommons mlperf [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-src/index.md
new file mode 100644
index 0000000000..44b4f4b4fb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-src/index.md
@@ -0,0 +1,192 @@
+# get-mlperf-inference-src
+Automatically generated README for this automation recipe: **get-mlperf-inference-src**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-src/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get src source inference inference-src inference-source mlperf mlcommons" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,src,source,inference,inference-src,inference-source,mlperf,mlcommons[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get src source inference inference-src inference-source mlperf mlcommons [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,src,source,inference,inference-src,inference-source,mlperf,mlcommons'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get src source inference inference-src inference-source mlperf mlcommons[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_3d-unet`
+ - ENV variables:
+ - CM_SUBMODULE_3D_UNET: `yes`
+ * `_deeplearningexamples`
+ - ENV variables:
+ - CM_SUBMODULE_DEEPLEARNINGEXAMPLES: `yes`
+ * `_deepsparse`
+ - ENV variables:
+ - CM_GIT_CHECKOUT: `deepsparse`
+ - CM_GIT_URL: `https://github.com/neuralmagic/inference`
+ - CM_MLPERF_LAST_RELEASE: `v4.0`
+ * `_gn`
+ - ENV variables:
+ - CM_SUBMODULE_GN: `yes`
+ * `_no-recurse-submodules`
+ - ENV variables:
+ - CM_GIT_RECURSE_SUBMODULES: ``
+ * `_nvidia-pycocotools`
+ - ENV variables:
+ - CM_GIT_PATCH_FILENAME: `coco.patch`
+ * `_octoml`
+ - ENV variables:
+ - CM_GIT_URL: `https://github.com/octoml/inference`
+ * `_openimages-nvidia-pycocotools`
+ - ENV variables:
+ - CM_GIT_PATCH_FILENAME: `openimages-pycocotools.patch`
+ * `_patch`
+ - ENV variables:
+ - CM_GIT_PATCH: `yes`
+ * `_pybind`
+ - ENV variables:
+ - CM_SUBMODULE_PYBIND: `yes`
+ * `_recurse-submodules`
+ - ENV variables:
+ - CM_GIT_RECURSE_SUBMODULES: ` --recurse-submodules`
+ * `_repo.#`
+ - ENV variables:
+ - CM_GIT_URL: `#`
+ * `_submodules.#`
+ - ENV variables:
+ - CM_GIT_SUBMODULES: `#`
+
+
+
+
+ * Group "**checkout**"
+
+ Click here to expand this section.
+
+ * `_branch.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT: `#`
+ * `_sha.#`
+ - ENV variables:
+ - CM_GIT_SHA: `#`
+
+
+
+
+ * Group "**git-history**"
+
+ Click here to expand this section.
+
+ * `_full-history`
+ - ENV variables:
+ - CM_GIT_DEPTH: ``
+ * **`_short-history`** (default)
+ - ENV variables:
+ - CM_GIT_DEPTH: `--depth 10`
+
+
+
+
+ ##### Default variations
+
+ `_short-history`
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_GIT_CHECKOUT_FOLDER: `inference`
+ * CM_GIT_DEPTH: `--depth 4`
+ * CM_GIT_PATCH: `no`
+ * CM_GIT_RECURSE_SUBMODULES: ``
+ * CM_GIT_URL: `https://github.com/mlcommons/inference.git`
+
+
+#### Versions
+Default version: `master`
+
+* `custom`
+* `deepsparse`
+* `main`
+* `master`
+* `pybind_fix`
+* `r2.1`
+* `r3.0`
+* `r3.1`
+* `tvm`
+
+___
+#### Script output
+```bash
+cmr "get src source inference inference-src inference-source mlperf mlcommons [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-submission-dir/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-submission-dir/index.md
new file mode 100644
index 0000000000..d6375c0dc0
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-submission-dir/index.md
@@ -0,0 +1,111 @@
+# get-mlperf-inference-submission-dir
+Automatically generated README for this automation recipe: **get-mlperf-inference-submission-dir**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-submission-dir/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get mlperf inference submission dir directory" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,mlperf,inference,submission,dir,directory[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get mlperf inference submission dir directory [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,mlperf,inference,submission,dir,directory'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get mlperf inference submission dir directory[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**version**"
+
+ Click here to expand this section.
+
+ * `_version.#`
+ - ENV variables:
+ - CM_MLPERF_INFERENCE_SUBMISSION_VERSION: `#`
+ * **`_version.4_0`** (default)
+ - ENV variables:
+ - CM_MLPERF_INFERENCE_SUBMISSION_VERSION: `4_0`
+
+
+
+
+ ##### Default variations
+
+ `_version.4_0`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value`
+
+
+
+
+___
+#### Script output
+```bash
+cmr "get mlperf inference submission dir directory [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-configs/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-configs/index.md
new file mode 100644
index 0000000000..3aa4926b3f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-configs/index.md
@@ -0,0 +1,103 @@
+# get-mlperf-inference-sut-configs
+Automatically generated README for this automation recipe: **get-mlperf-inference-sut-configs**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-sut-configs/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-sut-configs/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get mlperf inference sut configs sut-configs" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,mlperf,inference,sut,configs,sut-configs [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get mlperf inference sut configs sut-configs " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,mlperf,inference,sut,configs,sut-configs'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get mlperf inference sut configs sut-configs" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--configs_git_url=value` → `CM_GIT_URL=value`
+ * `--repo_path=value` → `CM_SUT_CONFIGS_PATH=value`
+ * `--run_config=value` → `CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_SUT_CONFIGS_PATH: ``
+ * CM_GIT_URL: ``
+
+
+
+___
+#### Script output
+```bash
+cmr "get mlperf inference sut configs sut-configs " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-description/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-description/index.md
new file mode 100644
index 0000000000..7082c8a803
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-description/index.md
@@ -0,0 +1,100 @@
+# get-mlperf-inference-sut-description
+Automatically generated README for this automation recipe: **get-mlperf-inference-sut-description**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-sut-description/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get mlperf sut description system-under-test system-description" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,mlperf,sut,description,system-under-test,system-description [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get mlperf sut description system-under-test system-description " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,mlperf,sut,description,system-under-test,system-description'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get mlperf sut description system-under-test system-description" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--name=value` → `CM_HW_NAME=value`
+ * `--submitter=value` → `CM_MLPERF_SUBMITTER=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_SUT_DESC_CACHE: `no`
+
+
+
+___
+#### Script output
+```bash
+cmr "get mlperf sut description system-under-test system-description " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-logging/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-logging/index.md
new file mode 100644
index 0000000000..ce64cb5101
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-logging/index.md
@@ -0,0 +1,81 @@
+# get-mlperf-logging
+Automatically generated README for this automation recipe: **get-mlperf-logging**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-logging/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-logging/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get mlperf logging mlperf-logging" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,mlperf,logging,mlperf-logging
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get mlperf logging mlperf-logging "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,mlperf,logging,mlperf-logging'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get mlperf logging mlperf-logging"
+ ```
+___
+
+
+___
+#### Script output
+```bash
+cmr "get mlperf logging mlperf-logging " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-power-dev/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-power-dev/index.md
new file mode 100644
index 0000000000..d3cfef35e8
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-power-dev/index.md
@@ -0,0 +1,134 @@
+# get-mlperf-power-dev
+Automatically generated README for this automation recipe: **get-mlperf-power-dev**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-power-dev/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get src source power power-dev mlperf mlcommons" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,src,source,power,power-dev,mlperf,mlcommons[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get src source power power-dev mlperf mlcommons [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,src,source,power,power-dev,mlperf,mlcommons'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get src source power power-dev mlperf mlcommons[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**checkout**"
+
+ Click here to expand this section.
+
+ * `_branch.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT: `#`
+ * `_sha.#`
+ - ENV variables:
+ - CM_GIT_SHA: `#`
+ * `_tag.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_TAG: `#`
+
+
+
+
+ * Group "**repo**"
+
+ Click here to expand this section.
+
+ * **`_mlcommons`** (default)
+ - ENV variables:
+ - CM_GIT_URL: `https://github.com/mlcommons/power-dev.git`
+ * `_octoml`
+ - ENV variables:
+ - CM_GIT_URL: `https://github.com/octoml/power-dev.git`
+ * `_repo.#`
+ - ENV variables:
+ - CM_GIT_URL: `#`
+
+
+
+
+ ##### Default variations
+
+ `_mlcommons`
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_GIT_DEPTH: `--depth 1`
+ * CM_GIT_PATCH: `no`
+ * CM_GIT_CHECKOUT_FOLDER: `power-dev`
+
+
+
+___
+#### Script output
+```bash
+cmr "get src source power power-dev mlperf mlcommons [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-tiny-eembc-energy-runner-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-tiny-eembc-energy-runner-src/index.md
new file mode 100644
index 0000000000..dab580fbdc
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-tiny-eembc-energy-runner-src/index.md
@@ -0,0 +1,99 @@
+# get-mlperf-tiny-eembc-energy-runner-src
+Automatically generated README for this automation recipe: **get-mlperf-tiny-eembc-energy-runner-src**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-tiny-eembc-energy-runner-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get src source eembc energyrunner energy-runner eembc-energy-runner tinymlperf-energy-runner" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,src,source,eembc,energyrunner,energy-runner,eembc-energy-runner,tinymlperf-energy-runner
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get src source eembc energyrunner energy-runner eembc-energy-runner tinymlperf-energy-runner "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,src,source,eembc,energyrunner,energy-runner,eembc-energy-runner,tinymlperf-energy-runner'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get src source eembc energyrunner energy-runner eembc-energy-runner tinymlperf-energy-runner"
+ ```
+___
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_GIT_CHECKOUT: `main`
+ * CM_GIT_PATCH: `no`
+ * CM_GIT_RECURSE_SUBMODULES: ``
+ * CM_GIT_URL: `https://github.com/eembc/energyrunner`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-tiny-eembc-energy-runner-src/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-tiny-eembc-energy-runner-src/run.bat)
+___
+#### Script output
+```bash
+cmr "get src source eembc energyrunner energy-runner eembc-energy-runner tinymlperf-energy-runner " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-tiny-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-tiny-src/index.md
new file mode 100644
index 0000000000..9c76d468e7
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-tiny-src/index.md
@@ -0,0 +1,99 @@
+# get-mlperf-tiny-src
+Automatically generated README for this automation recipe: **get-mlperf-tiny-src**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-tiny-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get src source tiny tiny-src tiny-source tinymlperf tinymlperf-src mlperf mlcommons" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,src,source,tiny,tiny-src,tiny-source,tinymlperf,tinymlperf-src,mlperf,mlcommons
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get src source tiny tiny-src tiny-source tinymlperf tinymlperf-src mlperf mlcommons "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,src,source,tiny,tiny-src,tiny-source,tinymlperf,tinymlperf-src,mlperf,mlcommons'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get src source tiny tiny-src tiny-source tinymlperf tinymlperf-src mlperf mlcommons"
+ ```
+___
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_GIT_CHECKOUT: `master`
+ * CM_GIT_PATCH: `no`
+ * CM_GIT_RECURSE_SUBMODULES: ``
+ * CM_GIT_URL: `https://github.com/mlcommons/tiny.git`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-tiny-src/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-tiny-src/run.bat)
+___
+#### Script output
+```bash
+cmr "get src source tiny tiny-src tiny-source tinymlperf tinymlperf-src mlperf mlcommons " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-training-nvidia-code/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-training-nvidia-code/index.md
new file mode 100644
index 0000000000..3ee1a15ac1
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-training-nvidia-code/index.md
@@ -0,0 +1,112 @@
+# get-mlperf-training-nvidia-code
+Automatically generated README for this automation recipe: **get-mlperf-training-nvidia-code**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-training-nvidia-code/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get nvidia mlperf training code training-code" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,nvidia,mlperf,training,code,training-code[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get nvidia mlperf training code training-code [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,nvidia,mlperf,training,code,training-code'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get nvidia mlperf training code training-code[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**repo-owner**"
+
+ Click here to expand this section.
+
+ * `_ctuning`
+ - ENV variables:
+ - CM_TMP_TRAINING_SRC: `ctuning`
+ * `_custom`
+ * **`_mlcommons`** (default)
+ - ENV variables:
+ - CM_TMP_TRAINING_SRC: `mlcommons`
+ * `_nvidia-only`
+ - ENV variables:
+ - CM_TMP_TRAINING_SRC: `GATEOverflow`
+
+
+
+
+ ##### Default variations
+
+ `_mlcommons`
+#### Versions
+Default version: `r3.0`
+
+* `r2.1`
+* `r3.0`
+* `r3.1`
+
+___
+#### Script output
+```bash
+cmr "get nvidia mlperf training code training-code [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-training-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-training-src/index.md
new file mode 100644
index 0000000000..ac0c7803e4
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-training-src/index.md
@@ -0,0 +1,181 @@
+# get-mlperf-training-src
+Automatically generated README for this automation recipe: **get-mlperf-training-src**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-training-src/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-training-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get src source training training-src training-source mlperf mlcommons" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,src,source,training,training-src,training-source,mlperf,mlcommons[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get src source training training-src training-source mlperf mlcommons [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,src,source,training,training-src,training-source,mlperf,mlcommons'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get src source training training-src training-source mlperf mlcommons[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_no-recurse-submodules`
+ - ENV variables:
+ - CM_GIT_RECURSE_SUBMODULES: ``
+ * `_nvidia-retinanet`
+ - ENV variables:
+ - CM_GIT_PATCH_FILENAMES: `nvidia-retinanet.patch,cpu_load.patch`
+ * `_patch`
+ - ENV variables:
+ - CM_GIT_PATCH: `yes`
+
+
+
+
+ * Group "**checkout**"
+
+ Click here to expand this section.
+
+ * `_branch.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT: `#`
+ * `_sha.#`
+ - ENV variables:
+ - CM_GIT_SHA: `#`
+ * `_tag.#`
+ - ENV variables:
+ - CM_GIT_CHECKOUT_TAG: `#`
+
+
+
+
+ * Group "**git-history**"
+
+ Click here to expand this section.
+
+ * `_full-history`
+ - ENV variables:
+ - CM_GIT_DEPTH: ``
+ * **`_short-history`** (default)
+ - ENV variables:
+ - CM_GIT_DEPTH: `--depth 5`
+
+
+
+
+ * Group "**repo**"
+
+ Click here to expand this section.
+
+ * `_repo.#`
+ - ENV variables:
+ - CM_GIT_URL: `#`
+
+
+
+
+ * Group "**src**"
+
+ Click here to expand this section.
+
+ * **`_cknowledge`** (default)
+ - ENV variables:
+ - CM_GIT_URL: `https://github.com/cknowledge/training.git`
+ * `_mlcommons`
+ - ENV variables:
+ - CM_GIT_URL: `https://github.com/mlcommons/training.git`
+
+
+
+
+ ##### Default variations
+
+ `_cknowledge,_short-history`
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_GIT_CHECKOUT: `master`
+ * CM_GIT_DEPTH: `--depth 4`
+ * CM_GIT_PATCH: `no`
+ * CM_GIT_RECURSE_SUBMODULES: ` --recurse-submodules`
+ * CM_GIT_CHECKOUT_FOLDER: `training`
+
+
+#### Versions
+Default version: `master`
+
+* `custom`
+* `master`
+
+___
+#### Script output
+```bash
+cmr "get src source training training-src training-source mlperf mlcommons [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-nvidia-mitten/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-nvidia-mitten/index.md
new file mode 100644
index 0000000000..8746bdac31
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-nvidia-mitten/index.md
@@ -0,0 +1,90 @@
+# get-nvidia-mitten
+Automatically generated README for this automation recipe: **get-nvidia-mitten**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-nvidia-mitten/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-nvidia-mitten/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get nvidia mitten nvidia-mitten" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,nvidia,mitten,nvidia-mitten
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get nvidia mitten nvidia-mitten "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,nvidia,mitten,nvidia-mitten'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get nvidia mitten nvidia-mitten"
+ ```
+___
+
+#### Versions
+Default version: `master`
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-nvidia-mitten/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-nvidia-mitten/run.bat)
+___
+#### Script output
+```bash
+cmr "get nvidia mitten nvidia-mitten " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-spec-ptd/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-spec-ptd/index.md
new file mode 100644
index 0000000000..f2c9e85c78
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-spec-ptd/index.md
@@ -0,0 +1,121 @@
+# get-spec-ptd
+Automatically generated README for this automation recipe: **get-spec-ptd**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-spec-ptd/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-spec-ptd/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get spec ptd ptdaemon power daemon power-daemon mlperf mlcommons" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,spec,ptd,ptdaemon,power,daemon,power-daemon,mlperf,mlcommons [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get spec ptd ptdaemon power daemon power-daemon mlperf mlcommons " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,spec,ptd,ptdaemon,power,daemon,power-daemon,mlperf,mlcommons'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get spec ptd ptdaemon power daemon power-daemon mlperf mlcommons" [--input_flags]
+ ```
+___
+
+=== "Input Flags"
+
+
+ #### Input Flags
+
+ * --**input:** Path to SPEC PTDaemon (Optional)
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--input=value` → `CM_INPUT=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_GIT_CHECKOUT: `main`
+ * CM_GIT_DEPTH: `--depth 1`
+ * CM_GIT_PATCH: `no`
+ * CM_GIT_RECURSE_SUBMODULES: ` `
+ * CM_GIT_URL: `https://github.com/mlcommons/power.git`
+
+
+#### Versions
+Default version: `main`
+
+* `custom`
+* `main`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-spec-ptd/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get spec ptd ptdaemon power daemon power-daemon mlperf mlcommons " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-inference-to-experiment/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-inference-to-experiment/index.md
new file mode 100644
index 0000000000..8beb80672c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-inference-to-experiment/index.md
@@ -0,0 +1,107 @@
+# import-mlperf-inference-to-experiment
+Automatically generated README for this automation recipe: **import-mlperf-inference-to-experiment**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+Developers: [Grigori Fursin](https://cKnowledge.org/gfursin)
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-inference-to-experiment/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-inference-to-experiment/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "import mlperf inference mlperf-inference experiment 2experiment to-experiment" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=import,mlperf,inference,mlperf-inference,experiment,2experiment,to-experiment[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "import mlperf inference mlperf-inference experiment 2experiment to-experiment [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'import,mlperf,inference,mlperf-inference,experiment,2experiment,to-experiment'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "import mlperf inference mlperf-inference experiment 2experiment to-experiment[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_skip_checker`
+ - ENV variables:
+ - CM_SKIP_SUBMISSION_CHECKER: `True`
+
+
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--submitter=value` → `CM_MLPERF_SUBMITTER=value`
+ * `--target_repo=value` → `CM_IMPORT_MLPERF_INFERENCE_TARGET_REPO=value`
+
+
+
+
+___
+#### Script output
+```bash
+cmr "import mlperf inference mlperf-inference experiment 2experiment to-experiment [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-tiny-to-experiment/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-tiny-to-experiment/index.md
new file mode 100644
index 0000000000..ee0aa4edd2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-tiny-to-experiment/index.md
@@ -0,0 +1,91 @@
+# import-mlperf-tiny-to-experiment
+Automatically generated README for this automation recipe: **import-mlperf-tiny-to-experiment**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+Developers: [Grigori Fursin](https://cKnowledge.org/gfursin)
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-tiny-to-experiment/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-tiny-to-experiment/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "import mlperf tiny mlperf-tiny experiment 2experiment to-experiment" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=import,mlperf,tiny,mlperf-tiny,experiment,2experiment,to-experiment [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "import mlperf tiny mlperf-tiny experiment 2experiment to-experiment " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'import,mlperf,tiny,mlperf-tiny,experiment,2experiment,to-experiment'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "import mlperf tiny mlperf-tiny experiment 2experiment to-experiment" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--target_repo=value` → `CM_IMPORT_TINYMLPERF_TARGET_REPO=value`
+
+
+
+
+___
+#### Script output
+```bash
+cmr "import mlperf tiny mlperf-tiny experiment 2experiment to-experiment " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-training-to-experiment/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-training-to-experiment/index.md
new file mode 100644
index 0000000000..edda35499e
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-training-to-experiment/index.md
@@ -0,0 +1,97 @@
+# import-mlperf-training-to-experiment
+Automatically generated README for this automation recipe: **import-mlperf-training-to-experiment**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+Developers: [Grigori Fursin](https://cKnowledge.org/gfursin)
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-training-to-experiment/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-training-to-experiment/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "import mlperf training mlperf-training experiment 2experiment to-experiment" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=import,mlperf,training,mlperf-training,experiment,2experiment,to-experiment [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "import mlperf training mlperf-training experiment 2experiment to-experiment " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'import,mlperf,training,mlperf-training,experiment,2experiment,to-experiment'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "import mlperf training mlperf-training experiment 2experiment to-experiment" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--target_repo=value` → `CM_IMPORT_MLPERF_TRAINING_TARGET_REPO=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run_mlperf_logger.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-training-to-experiment/run_mlperf_logger.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "import mlperf training mlperf-training experiment 2experiment to-experiment " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/index.md
new file mode 100644
index 0000000000..b4011e7ce7
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/index.md
@@ -0,0 +1,41 @@
+* [add-custom-nvidia-system](add-custom-nvidia-system/index.md)
+* [benchmark-any-mlperf-inference-implementation](benchmark-any-mlperf-inference-implementation/index.md)
+* [build-mlperf-inference-server-nvidia](build-mlperf-inference-server-nvidia/index.md)
+* [generate-mlperf-inference-submission](generate-mlperf-inference-submission/index.md)
+* [generate-mlperf-inference-user-conf](generate-mlperf-inference-user-conf/index.md)
+* [generate-mlperf-tiny-report](generate-mlperf-tiny-report/index.md)
+* [generate-mlperf-tiny-submission](generate-mlperf-tiny-submission/index.md)
+* [generate-nvidia-engine](generate-nvidia-engine/index.md)
+* [get-mlperf-inference-intel-scratch-space](get-mlperf-inference-intel-scratch-space/index.md)
+* [get-mlperf-inference-loadgen](get-mlperf-inference-loadgen/index.md)
+* [get-mlperf-inference-nvidia-common-code](get-mlperf-inference-nvidia-common-code/index.md)
+* [get-mlperf-inference-nvidia-scratch-space](get-mlperf-inference-nvidia-scratch-space/index.md)
+* [get-mlperf-inference-results](get-mlperf-inference-results/index.md)
+* [get-mlperf-inference-results-dir](get-mlperf-inference-results-dir/index.md)
+* [get-mlperf-inference-src](get-mlperf-inference-src/index.md)
+* [get-mlperf-inference-submission-dir](get-mlperf-inference-submission-dir/index.md)
+* [get-mlperf-inference-sut-configs](get-mlperf-inference-sut-configs/index.md)
+* [get-mlperf-inference-sut-description](get-mlperf-inference-sut-description/index.md)
+* [get-mlperf-logging](get-mlperf-logging/index.md)
+* [get-mlperf-power-dev](get-mlperf-power-dev/index.md)
+* [get-mlperf-tiny-eembc-energy-runner-src](get-mlperf-tiny-eembc-energy-runner-src/index.md)
+* [get-mlperf-tiny-src](get-mlperf-tiny-src/index.md)
+* [get-mlperf-training-nvidia-code](get-mlperf-training-nvidia-code/index.md)
+* [get-mlperf-training-src](get-mlperf-training-src/index.md)
+* [get-nvidia-mitten](get-nvidia-mitten/index.md)
+* [get-spec-ptd](get-spec-ptd/index.md)
+* [import-mlperf-inference-to-experiment](import-mlperf-inference-to-experiment/index.md)
+* [import-mlperf-tiny-to-experiment](import-mlperf-tiny-to-experiment/index.md)
+* [import-mlperf-training-to-experiment](import-mlperf-training-to-experiment/index.md)
+* [install-mlperf-logging-from-src](install-mlperf-logging-from-src/index.md)
+* [prepare-training-data-bert](prepare-training-data-bert/index.md)
+* [prepare-training-data-resnet](prepare-training-data-resnet/index.md)
+* [preprocess-mlperf-inference-submission](preprocess-mlperf-inference-submission/index.md)
+* [process-mlperf-accuracy](process-mlperf-accuracy/index.md)
+* [push-mlperf-inference-results-to-github](push-mlperf-inference-results-to-github/index.md)
+* [run-mlperf-inference-mobilenet-models](run-mlperf-inference-mobilenet-models/index.md)
+* [run-mlperf-inference-submission-checker](run-mlperf-inference-submission-checker/index.md)
+* [run-mlperf-power-client](run-mlperf-power-client/index.md)
+* [run-mlperf-power-server](run-mlperf-power-server/index.md)
+* [run-mlperf-training-submission-checker](run-mlperf-training-submission-checker/index.md)
+* [truncate-mlperf-inference-accuracy-log](truncate-mlperf-inference-accuracy-log/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/install-mlperf-logging-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/install-mlperf-logging-from-src/index.md
new file mode 100644
index 0000000000..5b673d37df
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/install-mlperf-logging-from-src/index.md
@@ -0,0 +1,89 @@
+# install-mlperf-logging-from-src
+Automatically generated README for this automation recipe: **install-mlperf-logging-from-src**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/install-mlperf-logging-from-src/_cm.yaml)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install mlperf logging from.src" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,mlperf,logging,from.src
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install mlperf logging from.src "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,mlperf,logging,from.src'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install mlperf logging from.src"
+ ```
+___
+
+#### Versions
+* `master`
+* `v3.1`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-mlperf-logging-from-src/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install mlperf logging from.src " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/prepare-training-data-bert/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/prepare-training-data-bert/index.md
new file mode 100644
index 0000000000..9b3b8d1bc1
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/prepare-training-data-bert/index.md
@@ -0,0 +1,120 @@
+# prepare-training-data-bert
+Automatically generated README for this automation recipe: **prepare-training-data-bert**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-bert/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "prepare mlperf training data input bert" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=prepare,mlperf,training,data,input,bert[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "prepare mlperf training data input bert [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'prepare,mlperf,training,data,input,bert'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "prepare mlperf training data input bert[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**implementation**"
+
+ Click here to expand this section.
+
+ * **`_nvidia`** (default)
+ - ENV variables:
+ - CM_TMP_VARIATION: `nvidia`
+ * `_reference`
+ - ENV variables:
+ - CM_TMP_VARIATION: `reference`
+
+
+
+
+ ##### Default variations
+
+ `_nvidia`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--clean=value` → `CM_MLPERF_TRAINING_CLEAN_TFRECORDS=value`
+ * `--data_dir=value` → `CM_DATA_DIR=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-nvidia.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-bert/run-nvidia.sh)
+ * [run-reference.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-bert/run-reference.sh)
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-bert/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "prepare mlperf training data input bert [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/prepare-training-data-resnet/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/prepare-training-data-resnet/index.md
new file mode 100644
index 0000000000..1f4f113471
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/prepare-training-data-resnet/index.md
@@ -0,0 +1,129 @@
+# prepare-training-data-resnet
+Automatically generated README for this automation recipe: **prepare-training-data-resnet**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-resnet/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "prepare mlperf training data input resnet" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=prepare,mlperf,training,data,input,resnet[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "prepare mlperf training data input resnet [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'prepare,mlperf,training,data,input,resnet'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "prepare mlperf training data input resnet[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_mxnet.#`
+ - ENV variables:
+ - CM_MXNET_VERSION: `#`
+
+
+
+
+ * Group "**implementation**"
+
+ Click here to expand this section.
+
+ * **`_nvidia`** (default)
+ - ENV variables:
+ - CM_TMP_VARIATION: `nvidia`
+ * `_reference`
+ - ENV variables:
+ - CM_TMP_VARIATION: `reference`
+
+
+
+
+ ##### Default variations
+
+ `_nvidia`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--data_dir=value` → `CM_DATA_DIR=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-nvidia.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-resnet/run-nvidia.sh)
+ * [run-reference.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-resnet/run-reference.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "prepare mlperf training data input resnet [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/preprocess-mlperf-inference-submission/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/preprocess-mlperf-inference-submission/index.md
new file mode 100644
index 0000000000..79f70a3c08
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/preprocess-mlperf-inference-submission/index.md
@@ -0,0 +1,96 @@
+# preprocess-mlperf-inference-submission
+Automatically generated README for this automation recipe: **preprocess-mlperf-inference-submission**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/preprocess-mlperf-inference-submission/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "run mlc mlcommons mlperf inference submission mlperf-inference processor preprocessor preprocess" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=run,mlc,mlcommons,mlperf,inference,submission,mlperf-inference,processor,preprocessor,preprocess [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "run mlc mlcommons mlperf inference submission mlperf-inference processor preprocessor preprocess " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'run,mlc,mlcommons,mlperf,inference,submission,mlperf-inference,processor,preprocessor,preprocess'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "run mlc mlcommons mlperf inference submission mlperf-inference processor preprocessor preprocess" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value`
+ * `--submitter=value` → `CM_MLPERF_SUBMITTER=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/preprocess-mlperf-inference-submission/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "run mlc mlcommons mlperf inference submission mlperf-inference processor preprocessor preprocess " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/process-mlperf-accuracy/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/process-mlperf-accuracy/index.md
new file mode 100644
index 0000000000..bd5afa8b22
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/process-mlperf-accuracy/index.md
@@ -0,0 +1,177 @@
+# process-mlperf-accuracy
+Automatically generated README for this automation recipe: **process-mlperf-accuracy**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/process-mlperf-accuracy/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "run mlperf mlcommons accuracy mlc process process-accuracy" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=run,mlperf,mlcommons,accuracy,mlc,process,process-accuracy[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "run mlperf mlcommons accuracy mlc process process-accuracy [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'run,mlperf,mlcommons,accuracy,mlc,process,process-accuracy'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "run mlperf mlcommons accuracy mlc process process-accuracy[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**coco-evaluation-tool**"
+
+ Click here to expand this section.
+
+ * **`_default-pycocotools`** (default)
+ * `_nvidia-pycocotools`
+
+
+
+
+ * Group "**dataset**"
+
+ Click here to expand this section.
+
+ * `_cnndm`
+ - ENV variables:
+ - CM_DATASET: `cnndm`
+ * `_coco2014`
+ - ENV variables:
+ - CM_DATASET: `coco2014`
+ * **`_imagenet`** (default)
+ - ENV variables:
+ - CM_DATASET: `imagenet`
+ * `_kits19`
+ - ENV variables:
+ - CM_DATASET: `kits19`
+ * `_librispeech`
+ - ENV variables:
+ - CM_DATASET: `librispeech`
+ * `_open-orca`
+ - ENV variables:
+ - CM_DATASET: `openorca`
+ * `_openimages`
+ - ENV variables:
+ - CM_DATASET: `openimages`
+ * `_squad`
+ - ENV variables:
+ - CM_DATASET: `squad`
+ * `_terabyte`
+ - ENV variables:
+ - CM_DATASET: `squad`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * `_float16`
+ - ENV variables:
+ - CM_ACCURACY_DTYPE: `float16`
+ * **`_float32`** (default)
+ - ENV variables:
+ - CM_ACCURACY_DTYPE: `float32`
+ * `_float64`
+ - ENV variables:
+ - CM_ACCURACY_DTYPE: `float64`
+ * `_int16`
+ - ENV variables:
+ - CM_ACCURACY_DTYPE: `int16`
+ * `_int32`
+ - ENV variables:
+ - CM_ACCURACY_DTYPE: `int32`
+ * `_int64`
+ - ENV variables:
+ - CM_ACCURACY_DTYPE: `int64`
+ * `_int8`
+ - ENV variables:
+ - CM_ACCURACY_DTYPE: `int8`
+
+
+
+
+ ##### Default variations
+
+ `_default-pycocotools,_float32,_imagenet`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--result_dir=value` → `CM_MLPERF_ACCURACY_RESULTS_DIR=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/process-mlperf-accuracy/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/process-mlperf-accuracy/run.bat)
+___
+#### Script output
+```bash
+cmr "run mlperf mlcommons accuracy mlc process process-accuracy [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/push-mlperf-inference-results-to-github/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/push-mlperf-inference-results-to-github/index.md
new file mode 100644
index 0000000000..2f3245a0b3
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/push-mlperf-inference-results-to-github/index.md
@@ -0,0 +1,109 @@
+# push-mlperf-inference-results-to-github
+Automatically generated README for this automation recipe: **push-mlperf-inference-results-to-github**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/push-mlperf-inference-results-to-github/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "push mlperf mlperf-inference-results publish-results inference submission github" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=push,mlperf,mlperf-inference-results,publish-results,inference,submission,github [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "push mlperf mlperf-inference-results publish-results inference submission github " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'push,mlperf,mlperf-inference-results,publish-results,inference,submission,github'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "push mlperf mlperf-inference-results publish-results inference submission github" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--branch=value` → `CM_GIT_BRANCH=value`
+ * `--commit_message=value` → `CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE=value`
+ * `--repo_branch=value` → `CM_GIT_BRANCH=value`
+ * `--repo_url=value` → `CM_MLPERF_RESULTS_GIT_REPO_URL=value`
+ * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_MLPERF_RESULTS_GIT_REPO_URL: `https://github.com/ctuning/mlperf_inference_submissions_v4.0`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/push-mlperf-inference-results-to-github/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "push mlperf mlperf-inference-results publish-results inference submission github " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-inference-mobilenet-models/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-inference-mobilenet-models/index.md
new file mode 100644
index 0000000000..35bd027de2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-inference-mobilenet-models/index.md
@@ -0,0 +1,326 @@
+# run-mlperf-inference-mobilenet-models
+Automatically generated README for this automation recipe: **run-mlperf-inference-mobilenet-models**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+
+
+---
+
+## Set up
+
+We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset and register in CM.
+
+
+Click here to set up docker (Optional).
+
+### Docker Setup
+
+CM commands are expected to run natively but if you prefer not to modify the host system, you can do the below command to set up a docker container.
+
+```
+cm docker script --tags=run,mobilenet-models,_tflite,_accuracy-only \
+--adr.compiler.tags=gcc \
+--docker_cm_repo=mlcommons@cm4mlops \
+--imagenet_path=$HOME/imagenet-2012-val \
+--results_dir=$HOME/mobilenet_results \
+--submission_dir=$HOME/inference_submission_3.1 \
+--docker_skip_run_cmd
+```
+
+This command will build a docker container and give you an interactive shell from which you can execute the below CM run commands.
+* `results_dir`, `submission_dir` and `imagenet_path` are mounted from the host system.
+* `results_dir` and `submission_dir` are expected to be empty directories to be populated by the docker
+* `imagenet_path` should point to the imagenet folder containing the 50000 validation images.
+
+
+
+## Run Commands
+
+Since the runs can take many hours, in case you are running remotely you can install screen as follows. You may omit "screen" from all commands if you are running on a host system.
+```
+cmr "get generic-sys-util _screen"
+```
+### Default tflite
+
+
+#### Do a full accuracy run for all the models (can take almost a day)
+
+```
+screen cmr "run mobilenet-models _tflite _accuracy-only" \
+--adr.compiler.tags=gcc \
+--results_dir=$HOME/mobilenet_results
+```
+
+#### Do a full performance run for all the models (can take almost a day)
+```
+screen cmr "run mobilenet-models _tflite _performance-only" \
+--adr.compiler.tags=gcc \
+--results_dir=$HOME/mobilenet_results
+```
+
+#### Generate README files for all the runs
+```
+cmr "run mobilenet-models _tflite _populate-readme" \
+--adr.compiler.tags=gcc \
+--results_dir=$HOME/mobilenet_results
+```
+
+#### Generate actual submission tree
+
+We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes.
+```
+cmr "generate inference submission" \
+--results_dir=$HOME/mobilenet_results/valid_results \
+--submission_dir=$HOME/mobilenet_submission_tree \
+--clean \
+--infer_scenario_results=yes \
+--adr.compiler.tags=gcc --adr.inference-src.version=master \
+--run-checker \
+--submitter=cTuning \
+--hw_notes_extra="Result taken by NAME"
+```
+* Use `--hw_name="My system name"` to give a meaningful system name. Examples can be seen [here](https://github.com/mlcommons/inference_results_v3.0/tree/main/open/cTuning/systems)
+
+#### Push the results to GitHub repo
+
+First, create a fork of [this repo](https://github.com/ctuning/mlperf_inference_submissions_v3.1/). Then run the following command after replacing `--repo_url` with your fork URL.
+```
+cmr "push github mlperf inference submission" \
+--submission_dir=$HOME/mobilenet_submission_tree \
+--repo_url=https://github.com/ctuning/mlperf_inference_submissions_v3.1/ \
+--commit_message="Mobilenet results added"
+```
+
+Create a PR to [cTuning repo](https://github.com/ctuning/mlperf_inference_submissions_v3.1/)
+
+### Using ARMNN with NEON
+
+Follow the same procedure as above but for the first three experiment runs add `_armnn,_neon` to the tags. For example
+```
+cmr "run mobilenet-models _tflite _armnn _neon _accuracy-only" \
+--adr.compiler.tags=gcc \
+--results_dir=$HOME/mobilenet_results
+```
+
+`results_dir` and `submission_dir` can be the same as before as results will be going to different subfolders.
+
+### Using ARMNN with OpenCL
+Follow the same procedure as above but for the first three experiment runs add `_armnn,_opencl` to the tags. For example
+```
+cmr "run mobilenet-models _tflite _armnn _opencl _accuracy-only" \
+--adr.compiler.tags=gcc \
+--results_dir=$HOME/mobilenet_results
+```
+
+`results_dir` and `submission_dir` can be the same as before as results will be going to different subfolders.
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-mobilenet-models/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "run mobilenet models image-classification mobilenet-models mlperf inference" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=run,mobilenet,models,image-classification,mobilenet-models,mlperf,inference[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "run mobilenet models image-classification mobilenet-models mlperf inference [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'run,mobilenet,models,image-classification,mobilenet-models,mlperf,inference'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "run mobilenet models image-classification mobilenet-models mlperf inference[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_armnn`
+ - ENV variables:
+ - CM_MLPERF_USE_ARMNN_LIBRARY: `yes`
+ * `_neon`
+ - Aliases: `_use-neon`
+ - ENV variables:
+ - CM_MLPERF_USE_NEON: `yes`
+ * `_only-fp32`
+ - ENV variables:
+ - CM_MLPERF_RUN_INT8: `no`
+ * `_only-int8`
+ - ENV variables:
+ - CM_MLPERF_RUN_FP32: `no`
+ * `_opencl`
+ - ENV variables:
+ - CM_MLPERF_USE_OPENCL: `yes`
+
+
+
+
+ * Group "**base-framework**"
+
+ Click here to expand this section.
+
+ * **`_tflite`** (default)
+
+
+
+
+ * Group "**model-selection**"
+
+ Click here to expand this section.
+
+ * **`_all-models`** (default)
+ - ENV variables:
+ - CM_MLPERF_RUN_MOBILENETS: `yes`
+ - CM_MLPERF_RUN_EFFICIENTNETS: `yes`
+ * `_efficientnet`
+ - ENV variables:
+ - CM_MLPERF_RUN_EFFICIENTNETS: `yes`
+ * `_mobilenet`
+ - ENV variables:
+ - CM_MLPERF_RUN_MOBILENETS: `yes`
+
+
+
+
+ * Group "**optimization**"
+
+ Click here to expand this section.
+
+ * **`_tflite-default`** (default)
+ - ENV variables:
+ - CM_MLPERF_TFLITE_DEFAULT_MODE: `yes`
+
+
+
+
+ * Group "**run-mode**"
+
+ Click here to expand this section.
+
+ * `_accuracy-only`
+ - ENV variables:
+ - CM_MLPERF_FIND_PERFORMANCE_MODE: `no`
+ - CM_MLPERF_ACCURACY_MODE: `yes`
+ - CM_MLPERF_SUBMISSION_MODE: `no`
+ * `_find-performance`
+ - ENV variables:
+ - CM_MLPERF_FIND_PERFORMANCE_MODE: `yes`
+ - CM_MLPERF_SUBMISSION_MODE: `no`
+ * `_performance-only`
+ - ENV variables:
+ - CM_MLPERF_FIND_PERFORMANCE_MODE: `no`
+ - CM_MLPERF_PERFORMANCE_MODE: `yes`
+ - CM_MLPERF_SUBMISSION_MODE: `no`
+ * `_populate-readme`
+ - ENV variables:
+ - CM_MLPERF_FIND_PERFORMANCE_MODE: `no`
+ - CM_MLPERF_POPULATE_README: `yes`
+ * `_submission`
+ - ENV variables:
+ - CM_MLPERF_FIND_PERFORMANCE_MODE: `no`
+ - CM_MLPERF_SUBMISSION_MODE: `yes`
+
+
+
+
+ ##### Default variations
+
+ `_all-models,_tflite,_tflite-default`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--find-performance=value` → `CM_MLPERF_FIND_PERFORMANCE_MODE=value`
+ * `--imagenet_path=value` → `IMAGENET_PATH=value`
+ * `--no-rerun=value` → `CM_MLPERF_NO_RERUN=value`
+ * `--power=value` → `CM_MLPERF_POWER=value`
+ * `--results_dir=value` → `CM_MLPERF_INFERENCE_RESULTS_DIR=value`
+ * `--submission=value` → `CM_MLPERF_SUBMISSION_MODE=value`
+ * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_MLPERF_RUN_MOBILENETS: `no`
+ * CM_MLPERF_RUN_EFFICIENTNETS: `no`
+ * CM_MLPERF_NO_RERUN: `no`
+ * CM_MLPERF_RUN_FP32: `yes`
+ * CM_MLPERF_RUN_INT8: `yes`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-mobilenet-models/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "run mobilenet models image-classification mobilenet-models mlperf inference [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-inference-submission-checker/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-inference-submission-checker/index.md
new file mode 100644
index 0000000000..0231dcbd98
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-inference-submission-checker/index.md
@@ -0,0 +1,138 @@
+# run-mlperf-inference-submission-checker
+Automatically generated README for this automation recipe: **run-mlperf-inference-submission-checker**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-submission-checker/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-submission-checker/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "run mlc mlcommons mlperf inference mlperf-inference submission checker submission-checker mlc-submission-checker" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=run,mlc,mlcommons,mlperf,inference,mlperf-inference,submission,checker,submission-checker,mlc-submission-checker[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "run mlc mlcommons mlperf inference mlperf-inference submission checker submission-checker mlc-submission-checker [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'run,mlc,mlcommons,mlperf,inference,mlperf-inference,submission,checker,submission-checker,mlc-submission-checker'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "run mlc mlcommons mlperf inference mlperf-inference submission checker submission-checker mlc-submission-checker[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_short-run`
+ - ENV variables:
+ - CM_MLPERF_SHORT_RUN: `yes`
+
+
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--extra_args=value` → `CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS=value`
+ * `--extra_model_benchmark_map=value` → `CM_MLPERF_EXTRA_MODEL_MAPPING=value`
+ * `--input=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value`
+ * `--power=value` → `CM_MLPERF_POWER=value`
+ * `--push_to_github=value` → `CM_MLPERF_RESULT_PUSH_TO_GITHUB=value`
+ * `--skip_compliance=value` → `CM_MLPERF_SKIP_COMPLIANCE=value`
+ * `--skip_power_check=value` → `CM_MLPERF_SKIP_POWER_CHECK=value`
+ * `--src_version=value` → `CM_MLPERF_SUBMISSION_CHECKER_VERSION=value`
+ * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value`
+ * `--submitter=value` → `CM_MLPERF_SUBMITTER=value`
+ * `--tar=value` → `CM_TAR_SUBMISSION_DIR=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_MLPERF_SHORT_RUN: `no`
+
+
+#### Versions
+Default version: `master`
+
+* `master`
+* `r3.0`
+* `r3.1`
+* `r4.0`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-submission-checker/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-submission-checker/run.bat)
+___
+#### Script output
+```bash
+cmr "run mlc mlcommons mlperf inference mlperf-inference submission checker submission-checker mlc-submission-checker [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-power-client/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-power-client/index.md
new file mode 100644
index 0000000000..657bf339f2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-power-client/index.md
@@ -0,0 +1,119 @@
+# run-mlperf-power-client
+Automatically generated README for this automation recipe: **run-mlperf-power-client**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-client/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-client/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "run mlc mlcommons mlperf power client power-client" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=run,mlc,mlcommons,mlperf,power,client,power-client [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "run mlc mlcommons mlperf power client power-client " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'run,mlc,mlcommons,mlperf,power,client,power-client'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "run mlc mlcommons mlperf power client power-client" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--loadgen_logs_dir=value` → `CM_MLPERF_LOADGEN_LOGS_DIR=value`
+ * `--log_dir=value` → `CM_MLPERF_POWER_LOG_DIR=value`
+ * `--max_amps=value` → `CM_MLPERF_POWER_MAX_AMPS=value`
+ * `--max_volts=value` → `CM_MLPERF_POWER_MAX_VOLTS=value`
+ * `--ntp_server=value` → `CM_MLPERF_POWER_NTP_SERVER=value`
+ * `--port=value` → `CM_MLPERF_POWER_SERVER_PORT=value`
+ * `--power_server=value` → `CM_MLPERF_POWER_SERVER_ADDRESS=value`
+ * `--run_cmd=value` → `CM_MLPERF_RUN_CMD=value`
+ * `--server=value` → `CM_MLPERF_POWER_SERVER_ADDRESS=value`
+ * `--server_port=value` → `CM_MLPERF_POWER_SERVER_PORT=value`
+ * `--timestamp=value` → `CM_MLPERF_POWER_TIMESTAMP=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_MLPERF_POWER_LOG_DIR: `logs`
+ * CM_MLPERF_RUN_CMD: ``
+ * CM_MLPERF_POWER_SERVER_ADDRESS: `localhost`
+ * CM_MLPERF_POWER_NTP_SERVER: `time.google.com`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-client/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "run mlc mlcommons mlperf power client power-client " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-power-server/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-power-server/index.md
new file mode 100644
index 0000000000..be12a1dd37
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-power-server/index.md
@@ -0,0 +1,116 @@
+# run-mlperf-power-server
+Automatically generated README for this automation recipe: **run-mlperf-power-server**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-server/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-server/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "run mlc mlcommons mlperf power server power-server" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=run,mlc,mlcommons,mlperf,power,server,power-server [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "run mlc mlcommons mlperf power server power-server " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'run,mlc,mlcommons,mlperf,power,server,power-server'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "run mlc mlcommons mlperf power server power-server" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--device_port=value` → `CM_MLPERF_POWER_DEVICE_PORT=value`
+ * `--device_type=value` → `CM_MLPERF_POWER_DEVICE_TYPE=value`
+ * `--interface_flag=value` → `CM_MLPERF_POWER_INTERFACE_FLAG=value`
+ * `--ntp_server=value` → `CM_MLPERF_POWER_NTP_SERVER=value`
+ * `--screen=value` → `CM_MLPERF_POWER_SERVER_USE_SCREEN=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_MLPERF_POWER_NTP_SERVER: `time.google.com`
+ * CM_MLPERF_POWER_INTERFACE_FLAG: ``
+ * CM_MLPERF_POWER_DEVICE_TYPE: `49`
+ * CM_MLPERF_POWER_SERVER_ADDRESS: `0.0.0.0`
+ * CM_MLPERF_POWER_SERVER_PORT: `4950`
+ * CM_MLPERF_POWER_DEVICE_PORT: `/dev/usbtmc0`
+ * CM_MLPERF_POWER_SERVER_USE_SCREEN: `no`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-server/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-server/run.bat)
+___
+#### Script output
+```bash
+cmr "run mlc mlcommons mlperf power server power-server " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-training-submission-checker/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-training-submission-checker/index.md
new file mode 100644
index 0000000000..863aeae5e1
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-training-submission-checker/index.md
@@ -0,0 +1,135 @@
+# run-mlperf-training-submission-checker
+Automatically generated README for this automation recipe: **run-mlperf-training-submission-checker**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-training-submission-checker/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "run mlc mlcommons mlperf training train mlperf-training submission checker submission-checker mlc-submission-checker" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=run,mlc,mlcommons,mlperf,training,train,mlperf-training,submission,checker,submission-checker,mlc-submission-checker[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "run mlc mlcommons mlperf training train mlperf-training submission checker submission-checker mlc-submission-checker [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'run,mlc,mlcommons,mlperf,training,train,mlperf-training,submission,checker,submission-checker,mlc-submission-checker'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "run mlc mlcommons mlperf training train mlperf-training submission checker submission-checker mlc-submission-checker[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_short-run`
+ - ENV variables:
+ - CM_MLPERF_SHORT_RUN: `yes`
+
+
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--extra_args=value` → `CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS=value`
+ * `--input=value` → `CM_MLPERF_SUBMISSION_DIR=value`
+ * `--power=value` → `CM_MLPERF_POWER=value`
+ * `--push_to_github=value` → `CM_MLPERF_RESULT_PUSH_TO_GITHUB=value`
+ * `--skip_compliance=value` → `CM_MLPERF_SKIP_COMPLIANCE=value`
+ * `--skip_power_check=value` → `CM_MLPERF_SKIP_POWER_CHECK=value`
+ * `--src_version=value` → `CM_MLPERF_SUBMISSION_CHECKER_VERSION=value`
+ * `--submission_dir=value` → `CM_MLPERF_SUBMISSION_DIR=value`
+ * `--submitter=value` → `CM_MLPERF_SUBMITTER=value`
+ * `--tar=value` → `CM_TAR_SUBMISSION_DIR=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_MLPERF_SHORT_RUN: `no`
+
+
+#### Versions
+Default version: `master`
+
+* `master`
+* `r3.0`
+* `r3.1`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-training-submission-checker/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "run mlc mlcommons mlperf training train mlperf-training submission checker submission-checker mlc-submission-checker [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/truncate-mlperf-inference-accuracy-log/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/truncate-mlperf-inference-accuracy-log/index.md
new file mode 100644
index 0000000000..d0921c6d5f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/truncate-mlperf-inference-accuracy-log/index.md
@@ -0,0 +1,98 @@
+# truncate-mlperf-inference-accuracy-log
+Automatically generated README for this automation recipe: **truncate-mlperf-inference-accuracy-log**
+
+Category: **[MLPerf benchmark support](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/truncate-mlperf-inference-accuracy-log/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/truncate-mlperf-inference-accuracy-log/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "run mlc mlcommons mlperf inference mlperf-inference truncation truncator truncate accuracy accuracy-log accuracy-log-trancation accuracy-log-truncator mlc-accuracy-log-trancation mlc-accuracy-log-truncator" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=run,mlc,mlcommons,mlperf,inference,mlperf-inference,truncation,truncator,truncate,accuracy,accuracy-log,accuracy-log-trancation,accuracy-log-truncator,mlc-accuracy-log-trancation,mlc-accuracy-log-truncator [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "run mlc mlcommons mlperf inference mlperf-inference truncation truncator truncate accuracy accuracy-log accuracy-log-trancation accuracy-log-truncator mlc-accuracy-log-trancation mlc-accuracy-log-truncator " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'run,mlc,mlcommons,mlperf,inference,mlperf-inference,truncation,truncator,truncate,accuracy,accuracy-log,accuracy-log-trancation,accuracy-log-truncator,mlc-accuracy-log-trancation,mlc-accuracy-log-truncator'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "run mlc mlcommons mlperf inference mlperf-inference truncation truncator truncate accuracy accuracy-log accuracy-log-trancation accuracy-log-truncator mlc-accuracy-log-trancation mlc-accuracy-log-truncator" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--input=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value`
+ * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value`
+ * `--submitter=value` → `CM_MLPERF_SUBMITTER=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/truncate-mlperf-inference-accuracy-log/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "run mlc mlcommons mlperf inference mlperf-inference truncation truncator truncate accuracy accuracy-log accuracy-log-trancation accuracy-log-truncator mlc-accuracy-log-trancation mlc-accuracy-log-truncator " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-onnx-py/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-onnx-py/index.md
new file mode 100644
index 0000000000..981a09f076
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-onnx-py/index.md
@@ -0,0 +1,138 @@
+# app-image-classification-onnx-py
+Automatically generated README for this automation recipe: **app-image-classification-onnx-py**
+
+Category: **[Modular AI/ML application pipeline](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-onnx-py/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-onnx-py/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "modular python app image-classification onnx" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=modular,python,app,image-classification,onnx[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "modular python app image-classification onnx [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'modular,python,app,image-classification,onnx'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "modular python app image-classification onnx[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**target**"
+
+ Click here to expand this section.
+
+ * **`_cpu`** (default)
+ - ENV variables:
+ - USE_CPU: `True`
+ * `_cuda`
+ - ENV variables:
+ - USE_CUDA: `True`
+
+
+
+
+ ##### Default variations
+
+ `_cpu`
+=== "Input Flags"
+
+
+ #### Input Flags
+
+ * --**input:** Path to JPEG image to classify
+ * --**output:** Output directory (optional)
+ * --**j:** Print JSON output
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--input=value` → `CM_IMAGE=value`
+ * `--output=value` → `CM_APP_IMAGE_CLASSIFICATION_ONNX_PY_OUTPUT=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_BATCH_COUNT: `1`
+ * CM_BATCH_SIZE: `1`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-onnx-py/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-onnx-py/run.bat)
+___
+#### Script output
+```bash
+cmr "modular python app image-classification onnx [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tf-onnx-cpp/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tf-onnx-cpp/index.md
new file mode 100644
index 0000000000..bdb43e6f00
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tf-onnx-cpp/index.md
@@ -0,0 +1,98 @@
+# app-image-classification-tf-onnx-cpp
+Automatically generated README for this automation recipe: **app-image-classification-tf-onnx-cpp**
+
+Category: **[Modular AI/ML application pipeline](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-tf-onnx-cpp/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-tf-onnx-cpp/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "app image-classification cpp tensorflow onnx" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=app,image-classification,cpp,tensorflow,onnx
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "app image-classification cpp tensorflow onnx "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'app,image-classification,cpp,tensorflow,onnx'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "app image-classification cpp tensorflow onnx"
+ ```
+___
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_BATCH_COUNT: `1`
+ * CM_BATCH_SIZE: `1`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-tf-onnx-cpp/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "app image-classification cpp tensorflow onnx " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-torch-py/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-torch-py/index.md
new file mode 100644
index 0000000000..9c96b5ef2c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-torch-py/index.md
@@ -0,0 +1,113 @@
+# app-image-classification-torch-py
+Automatically generated README for this automation recipe: **app-image-classification-torch-py**
+
+Category: **[Modular AI/ML application pipeline](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-torch-py/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-torch-py/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "app image-classification python torch" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=app,image-classification,python,torch[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "app image-classification python torch [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'app,image-classification,python,torch'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "app image-classification python torch[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_cuda`
+ - ENV variables:
+ - USE_CUDA: `yes`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_BATCH_COUNT: `1`
+ * CM_BATCH_SIZE: `1`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-torch-py/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-torch-py/run.bat)
+___
+#### Script output
+```bash
+cmr "app image-classification python torch [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tvm-onnx-py/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tvm-onnx-py/index.md
new file mode 100644
index 0000000000..37f6b98a5a
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tvm-onnx-py/index.md
@@ -0,0 +1,114 @@
+# app-image-classification-tvm-onnx-py
+Automatically generated README for this automation recipe: **app-image-classification-tvm-onnx-py**
+
+Category: **[Modular AI/ML application pipeline](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-tvm-onnx-py/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-tvm-onnx-py/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "app image-classification python tvm-onnx" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=app,image-classification,python,tvm-onnx[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "app image-classification python tvm-onnx [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'app,image-classification,python,tvm-onnx'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "app image-classification python tvm-onnx[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_cuda`
+ - ENV variables:
+ - USE_CUDA: `yes`
+ * `_llvm`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_BATCH_COUNT: `1`
+ * CM_BATCH_SIZE: `1`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-tvm-onnx-py/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "app image-classification python tvm-onnx [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-stable-diffusion-onnx-py/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-stable-diffusion-onnx-py/index.md
new file mode 100644
index 0000000000..af2093236b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-stable-diffusion-onnx-py/index.md
@@ -0,0 +1,128 @@
+# app-stable-diffusion-onnx-py
+Automatically generated README for this automation recipe: **app-stable-diffusion-onnx-py**
+
+Category: **[Modular AI/ML application pipeline](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-stable-diffusion-onnx-py/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-stable-diffusion-onnx-py/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "modular python app stable-diffusion onnx" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=modular,python,app,stable-diffusion,onnx[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "modular python app stable-diffusion onnx [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'modular,python,app,stable-diffusion,onnx'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "modular python app stable-diffusion onnx[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**target**"
+
+ Click here to expand this section.
+
+ * **`_cpu`** (default)
+ - ENV variables:
+ - USE_CPU: `True`
+ - CM_DEVICE: `cpu`
+ * `_cuda`
+ - ENV variables:
+ - USE_CUDA: `True`
+ - CM_DEVICE: `cuda:0`
+
+
+
+
+ ##### Default variations
+
+ `_cpu`
+=== "Input Flags"
+
+
+ #### Input Flags
+
+ * --**text:** Text to generate image
+ * --**output:** Output directory
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--output=value` → `CM_APP_STABLE_DIFFUSION_ONNX_PY_OUTPUT=value`
+ * `--text=value` → `CM_APP_STABLE_DIFFUSION_ONNX_PY_TEXT=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-stable-diffusion-onnx-py/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/app-stable-diffusion-onnx-py/run.bat)
+___
+#### Script output
+```bash
+cmr "modular python app stable-diffusion onnx [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/index.md
new file mode 100644
index 0000000000..3de2f8ac67
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/index.md
@@ -0,0 +1,5 @@
+* [app-image-classification-onnx-py](app-image-classification-onnx-py/index.md)
+* [app-image-classification-tf-onnx-cpp](app-image-classification-tf-onnx-cpp/index.md)
+* [app-image-classification-torch-py](app-image-classification-torch-py/index.md)
+* [app-image-classification-tvm-onnx-py](app-image-classification-tvm-onnx-py/index.md)
+* [app-stable-diffusion-onnx-py](app-stable-diffusion-onnx-py/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-dummy/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-dummy/index.md
new file mode 100644
index 0000000000..b78e6db158
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-dummy/index.md
@@ -0,0 +1,235 @@
+# app-mlperf-inference-dummy
+Automatically generated README for this automation recipe: **app-mlperf-inference-dummy**
+
+Category: **[Modular MLPerf benchmarks](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-dummy/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "reproduce mlcommons mlperf inference harness dummy-harness dummy" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "reproduce mlcommons mlperf inference harness dummy-harness dummy [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "reproduce mlcommons mlperf inference harness dummy-harness dummy[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**backend**"
+
+ Click here to expand this section.
+
+ * **`_pytorch`** (default)
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `pytorch`
+
+
+
+
+ * Group "**batch-size**"
+
+ Click here to expand this section.
+
+ * `_bs.#`
+
+
+
+
+ * Group "**device**"
+
+ Click here to expand this section.
+
+ * **`_cpu`** (default)
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `cpu`
+ * `_cuda`
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `gpu`
+ - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart`
+
+
+
+
+ * Group "**loadgen-scenario**"
+
+ Click here to expand this section.
+
+ * `_multistream`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream`
+ * `_offline`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `Offline`
+ * `_server`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `Server`
+ * `_singlestream`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream`
+
+
+
+
+ * Group "**model**"
+
+ Click here to expand this section.
+
+ * `_bert-99`
+ - ENV variables:
+ - CM_MODEL: `bert-99`
+ - CM_SQUAD_ACCURACY_DTYPE: `float32`
+ * `_bert-99.9`
+ - ENV variables:
+ - CM_MODEL: `bert-99.9`
+ * `_gptj-99`
+ - ENV variables:
+ - CM_MODEL: `gptj-99`
+ - CM_SQUAD_ACCURACY_DTYPE: `float32`
+ * `_gptj-99.9`
+ - ENV variables:
+ - CM_MODEL: `gptj-99.9`
+ * `_llama2-70b-99`
+ - ENV variables:
+ - CM_MODEL: `llama2-70b-99`
+ * `_llama2-70b-99.9`
+ - ENV variables:
+ - CM_MODEL: `llama2-70b-99.9`
+ * **`_resnet50`** (default)
+ - ENV variables:
+ - CM_MODEL: `resnet50`
+ * `_retinanet`
+ - ENV variables:
+ - CM_MODEL: `retinanet`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * `_fp16`
+ * `_fp32`
+ * `_uint8`
+
+
+
+
+ ##### Default variations
+
+ `_cpu,_pytorch,_resnet50`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value`
+ * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value`
+ * `--mlperf_conf=value` → `CM_MLPERF_CONF=value`
+ * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value`
+ * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value`
+ * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value`
+ * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value`
+ * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value`
+ * `--rerun=value` → `CM_RERUN=value`
+ * `--results_repo=value` → `CM_MLPERF_INFERENCE_RESULTS_REPO=value`
+ * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value`
+ * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value`
+ * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value`
+ * `--skip_preprocess=value` → `CM_SKIP_PREPROCESS_DATASET=value`
+ * `--skip_preprocessing=value` → `CM_SKIP_PREPROCESS_DATASET=value`
+ * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value`
+ * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value`
+ * `--user_conf=value` → `CM_MLPERF_USER_CONF=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_MLPERF_LOADGEN_SCENARIO: `Offline`
+ * CM_MLPERF_LOADGEN_MODE: `performance`
+ * CM_SKIP_PREPROCESS_DATASET: `no`
+ * CM_SKIP_MODEL_DOWNLOAD: `no`
+ * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `dummy_harness`
+ * CM_MLPERF_SKIP_RUN: `no`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-dummy/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "reproduce mlcommons mlperf inference harness dummy-harness dummy [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-intel/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-intel/index.md
new file mode 100644
index 0000000000..6bab8f9099
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-intel/index.md
@@ -0,0 +1,347 @@
+# app-mlperf-inference-intel
+Automatically generated README for this automation recipe: **app-mlperf-inference-intel**
+
+Category: **[Modular MLPerf benchmarks](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-intel/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "reproduce mlcommons mlperf inference harness intel-harness intel intel-harness intel" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,intel-harness,intel,intel-harness,intel[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "reproduce mlcommons mlperf inference harness intel-harness intel intel-harness intel [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'reproduce,mlcommons,mlperf,inference,harness,intel-harness,intel,intel-harness,intel'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "reproduce mlcommons mlperf inference harness intel-harness intel intel-harness intel[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_bs.#`
+ - ENV variables:
+ - ML_MLPERF_MODEL_BATCH_SIZE: `#`
+ * `_v3.1`
+ - ENV variables:
+ - CM_MLPERF_INFERENCE_CODE_VERSION: `v3.1`
+
+
+
+
+ * Group "**device**"
+
+ Click here to expand this section.
+
+ * **`_cpu`** (default)
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `cpu`
+
+
+
+
+ * Group "**framework**"
+
+ Click here to expand this section.
+
+ * **`_pytorch`** (default)
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `pytorch`
+ - CM_MLPERF_BACKEND_LIB_NAMESPEC: `pytorch`
+
+
+
+
+ * Group "**loadgen-batchsize**"
+
+ Click here to expand this section.
+
+ * `_batch_size.#`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_BATCH_SIZE: `#`
+
+
+
+
+ * Group "**loadgen-scenario**"
+
+ Click here to expand this section.
+
+ * `_multistream`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream`
+ * `_offline`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `Offline`
+ * `_server`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `Server`
+ * `_singlestream`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream`
+
+
+
+
+ * Group "**model**"
+
+ Click here to expand this section.
+
+ * `_bert-99`
+ - ENV variables:
+ - CM_MODEL: `bert-99`
+ - CM_SQUAD_ACCURACY_DTYPE: `float32`
+ - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx`
+ * `_bert-99.9`
+ - ENV variables:
+ - CM_MODEL: `bert-99.9`
+ - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3733910/files/model.onnx`
+ * `_gptj-99`
+ - ENV variables:
+ - CM_MODEL: `gptj-99`
+ - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3733910/files/model.onnx`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `int8`
+ * `_gptj-99.9`
+ - ENV variables:
+ - CM_MODEL: `gptj-99.9`
+ - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3733910/files/model.onnx`
+ * **`_resnet50`** (default)
+ - ENV variables:
+ - CM_MODEL: `resnet50`
+ - dataset_imagenet_preprocessed_input_square_side: `224`
+ - ml_model_has_background_class: `YES`
+ - ml_model_image_height: `224`
+ - loadgen_buffer_size: `1024`
+ - loadgen_dataset_size: `50000`
+ - CM_BENCHMARK: `STANDALONE_CLASSIFICATION`
+ * `_retinanet`
+ - ENV variables:
+ - CM_MODEL: `retinanet`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth`
+ - dataset_imagenet_preprocessed_input_square_side: `224`
+ - ml_model_image_height: `800`
+ - ml_model_image_width: `800`
+ - loadgen_buffer_size: `64`
+ - loadgen_dataset_size: `24576`
+ - CM_BENCHMARK: `STANDALONE_OBJECT_DETECTION`
+
+
+
+
+ * Group "**network-mode**"
+
+ Click here to expand this section.
+
+ * `_network-server`
+ - ENV variables:
+ - CM_MLPERF_NETWORK_RUN_MODE: `network-server`
+ * **`_standalone`** (default)
+ - ENV variables:
+ - CM_MLPERF_NETWORK_RUN_MODE: `standalone`
+
+
+
+
+ * Group "**network-run-mode**"
+
+ Click here to expand this section.
+
+ * `_network-client`
+ - ENV variables:
+ - CM_MLPERF_NETWORK_RUN_MODE: `network-client`
+
+
+
+
+ * Group "**power-mode**"
+
+ Click here to expand this section.
+
+ * `_maxn`
+ - ENV variables:
+ - CM_MLPERF_NVIDIA_HARNESS_MAXN: `True`
+ * `_maxq`
+ - ENV variables:
+ - CM_MLPERF_NVIDIA_HARNESS_MAXQ: `True`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * `_fp32`
+ - ENV variables:
+ - CM_IMAGENET_ACCURACY_DTYPE: `float32`
+ * `_int4`
+ * `_uint8`
+
+
+
+
+ * Group "**run-mode**"
+
+ Click here to expand this section.
+
+ * `_build-harness`
+ - ENV variables:
+ - CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: `build_harness`
+ * `_calibration`
+ - ENV variables:
+ - CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: `calibration`
+ * **`_run-harness`** (default)
+ - ENV variables:
+ - CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: `run_harness`
+
+
+
+
+ * Group "**sut**"
+
+ Click here to expand this section.
+
+ * `_sapphire-rapids.112c`
+ - ENV variables:
+ - WARMUP: ` --warmup`
+ * `_sapphire-rapids.24c`
+
+
+
+
+ * Group "**version**"
+
+ Click here to expand this section.
+
+ * **`_v4.0`** (default)
+ - ENV variables:
+ - CM_MLPERF_INFERENCE_CODE_VERSION: `v4.0`
+
+
+
+
+ ##### Default variations
+
+ `_cpu,_pytorch,_resnet50,_run-harness,_standalone,_v4.0`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value`
+ * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value`
+ * `--mlperf_conf=value` → `CM_MLPERF_CONF=value`
+ * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value`
+ * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value`
+ * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value`
+ * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value`
+ * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value`
+ * `--rerun=value` → `CM_RERUN=value`
+ * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value`
+ * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value`
+ * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value`
+ * `--skip_preprocess=value` → `CM_SKIP_PREPROCESS_DATASET=value`
+ * `--skip_preprocessing=value` → `CM_SKIP_PREPROCESS_DATASET=value`
+ * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value`
+ * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value`
+ * `--user_conf=value` → `CM_MLPERF_USER_CONF=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_BATCH_COUNT: `1`
+ * CM_BATCH_SIZE: `1`
+ * CM_FAST_COMPILATION: `yes`
+ * CM_MLPERF_LOADGEN_SCENARIO: `Offline`
+ * CM_MLPERF_LOADGEN_MODE: `performance`
+ * CM_SKIP_PREPROCESS_DATASET: `no`
+ * CM_SKIP_MODEL_DOWNLOAD: `no`
+ * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `intel`
+ * CM_MLPERF_SKIP_RUN: `no`
+ * verbosity: `1`
+ * loadgen_trigger_cold_run: `0`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run_bert_harness.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-intel/run_bert_harness.sh)
+ * [run_gptj_harness_v3_1.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-intel/run_gptj_harness_v3_1.sh)
+ * [run_gptj_harness_v4_0.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-intel/run_gptj_harness_v4_0.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "reproduce mlcommons mlperf inference harness intel-harness intel intel-harness intel [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-qualcomm/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-qualcomm/index.md
new file mode 100644
index 0000000000..b46ea46771
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-qualcomm/index.md
@@ -0,0 +1,368 @@
+# app-mlperf-inference-qualcomm
+Automatically generated README for this automation recipe: **app-mlperf-inference-qualcomm**
+
+Category: **[Modular MLPerf benchmarks](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-qualcomm/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "reproduce mlcommons mlperf inference harness qualcomm-harness qualcomm kilt-harness kilt" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,qualcomm-harness,qualcomm,kilt-harness,kilt[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "reproduce mlcommons mlperf inference harness qualcomm-harness qualcomm kilt-harness kilt [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'reproduce,mlcommons,mlperf,inference,harness,qualcomm-harness,qualcomm,kilt-harness,kilt'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "reproduce mlcommons mlperf inference harness qualcomm-harness qualcomm kilt-harness kilt[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_activation-count.#`
+ - ENV variables:
+ - CM_MLPERF_QAIC_ACTIVATION_COUNT: `#`
+ * `_num-devices.4`
+ - ENV variables:
+ - CM_QAIC_DEVICES: `0,1,2,3`
+ * `_pro`
+ - ENV variables:
+ - qaic_queue_length: `10`
+
+
+
+
+ * Group "**batch-size**"
+
+ Click here to expand this section.
+
+ * `_bs.#`
+ - ENV variables:
+ - kilt_model_batch_size: `#`
+ * `_bs.0`
+ - ENV variables:
+ - kilt_model_batch_size: `1`
+
+
+
+
+ * Group "**device**"
+
+ Click here to expand this section.
+
+ * **`_cpu`** (default)
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `cpu`
+ - kilt_backend_type: `cpu`
+ * `_cuda`
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `gpu`
+ - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart`
+ - kilt_backend_type: `gpu`
+ * `_qaic`
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `qaic`
+ - CM_MLPERF_DEVICE_LIB_NAMESPEC: `QAic`
+ - kilt_backend_type: `qaic`
+
+
+
+
+ * Group "**framework**"
+
+ Click here to expand this section.
+
+ * `_glow`
+ - ENV variables:
+ - device: `qaic`
+ - CM_MLPERF_BACKEND: `glow`
+ - CM_MLPERF_BACKEND_LIB_NAMESPEC: `QAic`
+ * **`_onnxruntime`** (default)
+ - ENV variables:
+ - device: `onnxrt`
+ - CM_MLPERF_BACKEND: `onnxruntime`
+ - CM_MLPERF_BACKEND_LIB_NAMESPEC: `onnxruntime`
+ * `_tensorrt`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `tensorrt`
+ - device: `tensorrt`
+ - CM_MLPERF_BACKEND_NAME: `TensorRT`
+
+
+
+
+ * Group "**loadgen-batch-size**"
+
+ Click here to expand this section.
+
+ * `_loadgen-batch-size.#`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_BATCH_SIZE: `#`
+
+
+
+
+ * Group "**loadgen-scenario**"
+
+ Click here to expand this section.
+
+ * `_multistream`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream`
+ * `_offline`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `Offline`
+ * `_server`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `Server`
+ * `_singlestream`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream`
+
+
+
+
+ * Group "**model**"
+
+ Click here to expand this section.
+
+ * `_bert-99`
+ - ENV variables:
+ - CM_MODEL: `bert-99`
+ - CM_SQUAD_ACCURACY_DTYPE: `float32`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx`
+ * `_bert-99.9`
+ - ENV variables:
+ - CM_MODEL: `bert-99.9`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3733910/files/model.onnx`
+ * **`_resnet50`** (default)
+ - ENV variables:
+ - CM_MODEL: `resnet50`
+ - kilt_model_name: `resnet50`
+ - kilt_input_count: `1`
+ - kilt_output_count: `1`
+ - kilt_input_format: `FLOAT32,-1,224,224,3`
+ - kilt_output_format: `INT64,-1`
+ - dataset_imagenet_preprocessed_input_square_side: `224`
+ - ml_model_has_background_class: `YES`
+ - ml_model_image_height: `224`
+ - loadgen_buffer_size: `1024`
+ - loadgen_dataset_size: `50000`
+ - CM_BENCHMARK: `STANDALONE_CLASSIFICATION`
+ * `_retinanet`
+ - ENV variables:
+ - CM_MODEL: `retinanet`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth`
+ - kilt_model_name: `retinanet`
+ - kilt_input_count: `1`
+ - kilt_model_max_detections: `600`
+ - kilt_output_count: `1`
+ - kilt_input_format: `FLOAT32,-1,3,800,800`
+ - kilt_output_format: `INT64,-1`
+ - dataset_imagenet_preprocessed_input_square_side: `224`
+ - ml_model_image_height: `800`
+ - ml_model_image_width: `800`
+ - loadgen_buffer_size: `64`
+ - loadgen_dataset_size: `24576`
+ - CM_BENCHMARK: `STANDALONE_OBJECT_DETECTION`
+
+
+
+
+ * Group "**nsp**"
+
+ Click here to expand this section.
+
+ * `_nsp.#`
+ * `_nsp.14`
+ * `_nsp.16`
+
+
+
+
+ * Group "**power-mode**"
+
+ Click here to expand this section.
+
+ * `_maxn`
+ - ENV variables:
+ - CM_MLPERF_NVIDIA_HARNESS_MAXN: `True`
+ * `_maxq`
+ - ENV variables:
+ - CM_MLPERF_NVIDIA_HARNESS_MAXQ: `True`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * `_fp16`
+ * `_fp32`
+ - ENV variables:
+ - CM_IMAGENET_ACCURACY_DTYPE: `float32`
+ * `_uint8`
+
+
+
+
+ * Group "**run-mode**"
+
+ Click here to expand this section.
+
+ * `_network-client`
+ - ENV variables:
+ - CM_RUN_MODE: `network-client`
+ * `_network-server`
+ - ENV variables:
+ - CM_RUN_MODE: `network-server`
+ * **`_standalone`** (default)
+ - ENV variables:
+ - CM_RUN_MODE: `standalone`
+
+
+
+
+ * Group "**sut**"
+
+ Click here to expand this section.
+
+ * `_dl2q.24xlarge`
+ - ENV variables:
+ - CM_QAIC_DEVICES: `0,1,2,3,4,5,6,7`
+ - qaic_queue_length: `4`
+ * `_rb6`
+ - ENV variables:
+ - CM_QAIC_DEVICES: `0`
+ - qaic_queue_length: `6`
+
+
+
+
+ ##### Default variations
+
+ `_cpu,_onnxruntime,_resnet50,_standalone`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value`
+ * `--devices=value` → `CM_QAIC_DEVICES=value`
+ * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value`
+ * `--mlperf_conf=value` → `CM_MLPERF_CONF=value`
+ * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value`
+ * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value`
+ * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value`
+ * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value`
+ * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value`
+ * `--rerun=value` → `CM_RERUN=value`
+ * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value`
+ * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value`
+ * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value`
+ * `--skip_preprocess=value` → `CM_SKIP_PREPROCESS_DATASET=value`
+ * `--skip_preprocessing=value` → `CM_SKIP_PREPROCESS_DATASET=value`
+ * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value`
+ * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value`
+ * `--user_conf=value` → `CM_MLPERF_USER_CONF=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_BATCH_COUNT: `1`
+ * CM_BATCH_SIZE: `1`
+ * CM_FAST_COMPILATION: `yes`
+ * CM_MLPERF_LOADGEN_SCENARIO: `Offline`
+ * CM_MLPERF_LOADGEN_MODE: `performance`
+ * CM_SKIP_PREPROCESS_DATASET: `no`
+ * CM_SKIP_MODEL_DOWNLOAD: `no`
+ * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `kilt`
+ * CM_MLPERF_SKIP_RUN: `no`
+ * CM_KILT_REPO_URL: `https://github.com/GATEOverflow/kilt-mlperf`
+ * CM_QAIC_DEVICES: `0`
+ * kilt_max_wait_abs: `10000`
+ * verbosity: `0`
+ * loadgen_trigger_cold_run: `0`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-qualcomm/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "reproduce mlcommons mlperf inference harness qualcomm-harness qualcomm kilt-harness kilt [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/index.md
new file mode 100644
index 0000000000..9675eae169
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/index.md
@@ -0,0 +1,3 @@
+* [app-mlperf-inference-dummy](app-mlperf-inference-dummy/index.md)
+* [app-mlperf-inference-intel](app-mlperf-inference-intel/index.md)
+* [app-mlperf-inference-qualcomm](app-mlperf-inference-qualcomm/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-loadgen-generic-python/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-loadgen-generic-python/index.md
new file mode 100644
index 0000000000..1a08adcbe2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-loadgen-generic-python/index.md
@@ -0,0 +1,213 @@
+# app-loadgen-generic-python
+Automatically generated README for this automation recipe: **app-loadgen-generic-python**
+
+Category: **[Modular MLPerf inference benchmark pipeline](..)**
+
+License: **Apache 2.0**
+
+Developers: [Gaz Iqbal](https://www.linkedin.com/in/gaziqbal), [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin)
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-loadgen-generic-python/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-loadgen-generic-python/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "python app generic loadgen" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=python,app,generic,loadgen[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "python app generic loadgen [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'python,app,generic,loadgen'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "python app generic loadgen[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_cmc`
+ - ENV variables:
+ - CM_CUSTOM_MODEL_CMC: `True`
+ * `_huggingface`
+ - ENV variables:
+ - CM_CUSTOM_MODEL_SOURCE: `huggingface`
+ * `_model-stub.#`
+ - ENV variables:
+ - CM_ML_MODEL_STUB: `#`
+
+
+
+
+ * Group "**backend**"
+
+ Click here to expand this section.
+
+ * **`_onnxruntime`** (default)
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `onnxruntime`
+ * `_pytorch`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `pytorch`
+
+
+
+
+ * Group "**device**"
+
+ Click here to expand this section.
+
+ * **`_cpu`** (default)
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `cpu`
+ - CM_MLPERF_EXECUTION_PROVIDER: `CPUExecutionProvider`
+ * `_cuda`
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `gpu`
+ - CM_MLPERF_EXECUTION_PROVIDER: `CUDAExecutionProvider`
+
+
+
+
+ * Group "**models**"
+
+ Click here to expand this section.
+
+ * `_custom`
+ - ENV variables:
+ - CM_MODEL: `custom`
+ * `_resnet50`
+ - ENV variables:
+ - CM_MODEL: `resnet50`
+ * `_retinanet`
+ - ENV variables:
+ - CM_MODEL: `retinanet`
+
+
+
+
+ ##### Default variations
+
+ `_cpu,_onnxruntime`
+=== "Input Flags"
+
+
+ #### Input Flags
+
+ * --**modelpath:** Full path to file with model weights
+ * --**modelcodepath:** (for PyTorch models) Full path to file with model code and cmc.py
+ * --**modelcfgpath:** (for PyTorch models) Full path to JSON file with model cfg
+ * --**modelsamplepath:** (for PyTorch models) Full path to file with model sample in pickle format
+ * --**ep:** ONNX Execution provider
+ * --**scenario:** MLPerf LoadGen scenario
+ * --**samples:** Number of samples (*2*)
+ * --**runner:** MLPerf runner
+ * --**execmode:** MLPerf exec mode
+ * --**output_dir:** MLPerf output directory
+ * --**concurrency:** MLPerf concurrency
+ * --**intraop:** MLPerf intra op threads
+ * --**interop:** MLPerf inter op threads
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--concurrency=value` → `CM_MLPERF_CONCURRENCY=value`
+ * `--ep=value` → `CM_MLPERF_EXECUTION_PROVIDER=value`
+ * `--execmode=value` → `CM_MLPERF_EXEC_MODE=value`
+ * `--interop=value` → `CM_MLPERF_INTEROP=value`
+ * `--intraop=value` → `CM_MLPERF_INTRAOP=value`
+ * `--loadgen_duration_sec=value` → `CM_MLPERF_LOADGEN_DURATION_SEC=value`
+ * `--loadgen_expected_qps=value` → `CM_MLPERF_LOADGEN_EXPECTED_QPS=value`
+ * `--modelcfg=value` → `CM_ML_MODEL_CFG=value`
+ * `--modelcfgpath=value` → `CM_ML_MODEL_CFG_WITH_PATH=value`
+ * `--modelcodepath=value` → `CM_ML_MODEL_CODE_WITH_PATH=value`
+ * `--modelpath=value` → `CM_ML_MODEL_FILE_WITH_PATH=value`
+ * `--modelsamplepath=value` → `CM_ML_MODEL_SAMPLE_WITH_PATH=value`
+ * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value`
+ * `--runner=value` → `CM_MLPERF_RUNNER=value`
+ * `--samples=value` → `CM_MLPERF_LOADGEN_SAMPLES=value`
+ * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_MLPERF_EXECUTION_MODE: `parallel`
+ * CM_MLPERF_BACKEND: `onnxruntime`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-loadgen-generic-python/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/app-loadgen-generic-python/run.bat)
+___
+#### Script output
+```bash
+cmr "python app generic loadgen [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-ctuning-cpp-tflite/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-ctuning-cpp-tflite/index.md
new file mode 100644
index 0000000000..9e10444516
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-ctuning-cpp-tflite/index.md
@@ -0,0 +1,236 @@
+# app-mlperf-inference-ctuning-cpp-tflite
+Automatically generated README for this automation recipe: **app-mlperf-inference-ctuning-cpp-tflite**
+
+Category: **[Modular MLPerf inference benchmark pipeline](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "app mlperf inference tflite-cpp" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=app,mlperf,inference,tflite-cpp[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "app mlperf inference tflite-cpp [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'app,mlperf,inference,tflite-cpp'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "app mlperf inference tflite-cpp[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_armnn`
+ - ENV variables:
+ - CM_MLPERF_TFLITE_USE_ARMNN: `yes`
+ - CM_TMP_LINK_LIBS: `tensorflowlite,armnn`
+
+
+
+
+ * Group "**backend**"
+
+ Click here to expand this section.
+
+ * `_tf`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `tf`
+ * **`_tflite`** (default)
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `tflite`
+ - CM_MLPERF_BACKEND_VERSION: `master`
+ - CM_TMP_LINK_LIBS: `tensorflowlite`
+ - CM_TMP_SRC_FOLDER: `src`
+
+
+
+
+ * Group "**device**"
+
+ Click here to expand this section.
+
+ * **`_cpu`** (default)
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `cpu`
+ * `_gpu`
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `gpu`
+ - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart`
+
+
+
+
+ * Group "**loadgen-scenario**"
+
+ Click here to expand this section.
+
+ * **`_singlestream`** (default)
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream`
+
+
+
+
+ * Group "**model**"
+
+ Click here to expand this section.
+
+ * `_efficientnet`
+ - ENV variables:
+ - CM_MODEL: `efficientnet`
+ * `_mobilenet`
+ - ENV variables:
+ - CM_MODEL: `mobilenet`
+ * **`_resnet50`** (default)
+ - ENV variables:
+ - CM_MODEL: `resnet50`
+
+
+
+
+ * Group "**optimization-target**"
+
+ Click here to expand this section.
+
+ * `_use-neon`
+ - ENV variables:
+ - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: `using_neon`
+ - CM_MLPERF_TFLITE_USE_NEON: `1`
+ * `_use-opencl`
+ - ENV variables:
+ - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: `using_opencl`
+ - CM_MLPERF_TFLITE_USE_OPENCL: `1`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * **`_fp32`** (default)
+ - ENV variables:
+ - CM_MLPERF_MODEL_PRECISION: `float32`
+ * `_int8`
+ - ENV variables:
+ - CM_DATASET_COMPRESSED: `on`
+ - CM_MLPERF_MODEL_PRECISION: `int8`
+ * `_uint8`
+ - ENV variables:
+ - CM_DATASET_COMPRESSED: `on`
+ - CM_MLPERF_MODEL_PRECISION: `uint8`
+
+
+
+
+ ##### Default variations
+
+ `_cpu,_fp32,_resnet50,_singlestream,_tflite`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--compressed_dataset=value` → `CM_DATASET_COMPRESSED=value`
+ * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value`
+ * `--mlperf_conf=value` → `CM_MLPERF_CONF=value`
+ * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value`
+ * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value`
+ * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value`
+ * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value`
+ * `--user_conf=value` → `CM_MLPERF_USER_CONF=value`
+ * `--verbose=value` → `CM_VERBOSE=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_DATASET_COMPRESSED: `off`
+ * CM_DATASET_INPUT_SQUARE_SIDE: `224`
+ * CM_FAST_COMPILATION: `yes`
+ * CM_LOADGEN_BUFFER_SIZE: `1024`
+ * CM_MLPERF_LOADGEN_MODE: `accuracy`
+ * CM_MLPERF_LOADGEN_SCENARIO: `SingleStream`
+ * CM_MLPERF_LOADGEN_TRIGGER_COLD_RUN: `0`
+ * CM_MLPERF_OUTPUT_DIR: `.`
+ * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `tflite_cpp`
+ * CM_MLPERF_TFLITE_USE_NEON: `0`
+ * CM_MLPERF_TFLITE_USE_OPENCL: `0`
+ * CM_ML_MODEL_GIVEN_CHANNEL_MEANS: `123.68 116.78 103.94`
+ * CM_ML_MODEL_NORMALIZE_DATA: `0`
+ * CM_ML_MODEL_SUBTRACT_MEANS: `1`
+ * CM_VERBOSE: `0`
+
+
+
+___
+#### Script output
+```bash
+cmr "app mlperf inference tflite-cpp [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-cpp/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-cpp/index.md
new file mode 100644
index 0000000000..a2c71b5f5d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-cpp/index.md
@@ -0,0 +1,204 @@
+# app-mlperf-inference-mlcommons-cpp
+Automatically generated README for this automation recipe: **app-mlperf-inference-mlcommons-cpp**
+
+Category: **[Modular MLPerf inference benchmark pipeline](..)**
+
+License: **Apache 2.0**
+
+Developers: [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin)
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-mlcommons-cpp/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-mlcommons-cpp/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "app mlcommons mlperf inference cpp" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=app,mlcommons,mlperf,inference,cpp[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "app mlcommons mlperf inference cpp [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'app,mlcommons,mlperf,inference,cpp'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "app mlcommons mlperf inference cpp[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**batch-size**"
+
+ Click here to expand this section.
+
+ * `_batch-size.#`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: `#`
+
+
+
+
+ * Group "**device**"
+
+ Click here to expand this section.
+
+ * **`_cpu`** (default)
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `cpu`
+ * `_cuda`
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `gpu`
+ - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart`
+
+
+
+
+ * Group "**framework**"
+
+ Click here to expand this section.
+
+ * **`_onnxruntime`** (default)
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `onnxruntime`
+ - CM_MLPERF_BACKEND_LIB_NAMESPEC: `onnxruntime`
+ * `_pytorch`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `pytorch`
+ * `_tf`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `tf`
+ * `_tflite`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `tflite`
+ * `_tvm-onnx`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `tvm-onnx`
+
+
+
+
+ * Group "**loadgen-scenario**"
+
+ Click here to expand this section.
+
+ * `_multistream`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream`
+ * **`_offline`** (default)
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `Offline`
+ * `_server`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `Server`
+ * `_singlestream`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream`
+ - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: `1`
+
+
+
+
+ * Group "**model**"
+
+ Click here to expand this section.
+
+ * **`_resnet50`** (default)
+ - ENV variables:
+ - CM_MODEL: `resnet50`
+ * `_retinanet`
+ - ENV variables:
+ - CM_MODEL: `retinanet`
+
+
+
+
+ ##### Default variations
+
+ `_cpu,_offline,_onnxruntime,_resnet50`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value`
+ * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value`
+ * `--mlperf_conf=value` → `CM_MLPERF_CONF=value`
+ * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value`
+ * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value`
+ * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value`
+ * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value`
+ * `--user_conf=value` → `CM_MLPERF_USER_CONF=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_BATCH_COUNT: `1`
+ * CM_BATCH_SIZE: `1`
+ * CM_FAST_COMPILATION: `yes`
+ * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `cpp`
+
+
+
+___
+#### Script output
+```bash
+cmr "app mlcommons mlperf inference cpp [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-python/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-python/index.md
new file mode 100644
index 0000000000..4206a8ee00
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-python/index.md
@@ -0,0 +1,392 @@
+# app-mlperf-inference-mlcommons-python
+Automatically generated README for this automation recipe: **app-mlperf-inference-mlcommons-python**
+
+Category: **[Modular MLPerf inference benchmark pipeline](..)**
+
+License: **Apache 2.0**
+
+Developers: [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Grigori Fursin](https://cKnowledge.org/gfursin)
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-mlcommons-python/README-extra.md)
+
+
+---
+
+This portable CM script is being developed by the [MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/mlperf-education-workgroup.md)
+to modularize the *python reference implementations* of the [MLPerf inference benchmark](https://github.com/mlcommons/inference)
+using the [MLCommons CM automation meta-framework](https://github.com/mlcommons/ck).
+The goal is to make it easier to run, optimize and reproduce MLPerf benchmarks
+across diverse platforms with continuously changing software and hardware.
+
+See the current coverage of different models, devices and backends [here](README-extra.md#current-coverage).
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-mlcommons-python/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "app vision language mlcommons mlperf inference reference ref" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=app,vision,language,mlcommons,mlperf,inference,reference,ref[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "app vision language mlcommons mlperf inference reference ref [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'app,vision,language,mlcommons,mlperf,inference,reference,ref'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "app vision language mlcommons mlperf inference reference ref[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_3d-unet`
+ - ENV variables:
+ - CM_TMP_IGNORE_MLPERF_QUERY_COUNT: `True`
+ - CM_MLPERF_MODEL_SKIP_BATCHING: `True`
+ * `_beam_size.#`
+ - ENV variables:
+ - GPTJ_BEAM_SIZE: `#`
+ * `_bert`
+ - ENV variables:
+ - CM_MLPERF_MODEL_SKIP_BATCHING: `True`
+ * `_dlrm`
+ - ENV variables:
+ - CM_MLPERF_MODEL_SKIP_BATCHING: `True`
+ * `_multistream`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream`
+ * `_offline`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `Offline`
+ * `_r2.1_default`
+ - ENV variables:
+ - CM_RERUN: `yes`
+ - CM_SKIP_SYS_UTILS: `yes`
+ - CM_TEST_QUERY_COUNT: `100`
+ * `_server`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `Server`
+ * `_singlestream`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream`
+
+
+
+
+ * Group "**batch-size**"
+
+ Click here to expand this section.
+
+ * `_batch_size.#`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: `#`
+
+
+
+
+ * Group "**device**"
+
+ Click here to expand this section.
+
+ * **`_cpu`** (default)
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `cpu`
+ - CUDA_VISIBLE_DEVICES: ``
+ - USE_CUDA: `False`
+ - USE_GPU: `False`
+ * `_cuda`
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `gpu`
+ - USE_CUDA: `True`
+ - USE_GPU: `True`
+ * `_rocm`
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `rocm`
+ - USE_GPU: `True`
+ * `_tpu`
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `tpu`
+
+
+
+
+ * Group "**framework**"
+
+ Click here to expand this section.
+
+ * `_deepsparse`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `deepsparse`
+ - CM_MLPERF_BACKEND_VERSION: `<<>>`
+ * `_ncnn`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `ncnn`
+ - CM_MLPERF_BACKEND_VERSION: `<<>>`
+ - CM_MLPERF_VISION_DATASET_OPTION: `imagenet_pytorch`
+ * **`_onnxruntime`** (default)
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `onnxruntime`
+ * `_pytorch`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `pytorch`
+ - CM_MLPERF_BACKEND_VERSION: `<<>>`
+ * `_ray`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `ray`
+ - CM_MLPERF_BACKEND_VERSION: `<<>>`
+ * `_tf`
+ - Aliases: `_tensorflow`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `tf`
+ - CM_MLPERF_BACKEND_VERSION: `<<>>`
+ * `_tflite`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `tflite`
+ - CM_MLPERF_BACKEND_VERSION: `<<>>`
+ - CM_MLPERF_VISION_DATASET_OPTION: `imagenet_tflite_tpu`
+ * `_tvm-onnx`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `tvm-onnx`
+ - CM_MLPERF_BACKEND_VERSION: `<<>>`
+ * `_tvm-pytorch`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `tvm-pytorch`
+ - CM_MLPERF_BACKEND_VERSION: `<<>>`
+ - CM_PREPROCESS_PYTORCH: `yes`
+ - MLPERF_TVM_TORCH_QUANTIZED_ENGINE: `qnnpack`
+ * `_tvm-tflite`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `tvm-tflite`
+ - CM_MLPERF_BACKEND_VERSION: `<<>>`
+
+
+
+
+ * Group "**implementation**"
+
+ Click here to expand this section.
+
+ * **`_python`** (default)
+ - ENV variables:
+ - CM_MLPERF_PYTHON: `yes`
+ - CM_MLPERF_IMPLEMENTATION: `reference`
+
+
+
+
+ * Group "**models**"
+
+ Click here to expand this section.
+
+ * `_3d-unet-99`
+ - ENV variables:
+ - CM_MODEL: `3d-unet-99`
+ * `_3d-unet-99.9`
+ - ENV variables:
+ - CM_MODEL: `3d-unet-99.9`
+ * `_bert-99`
+ - ENV variables:
+ - CM_MODEL: `bert-99`
+ * `_bert-99.9`
+ - ENV variables:
+ - CM_MODEL: `bert-99.9`
+ * `_dlrm-99`
+ - ENV variables:
+ - CM_MODEL: `dlrm-99`
+ * `_dlrm-99.9`
+ - ENV variables:
+ - CM_MODEL: `dlrm-99.9`
+ * `_gptj-99`
+ - ENV variables:
+ - CM_MODEL: `gptj-99`
+ * `_gptj-99.9`
+ - ENV variables:
+ - CM_MODEL: `gptj-99.9`
+ * `_llama2-70b-99`
+ - ENV variables:
+ - CM_MODEL: `llama2-70b-99`
+ * `_llama2-70b-99.9`
+ - ENV variables:
+ - CM_MODEL: `llama2-70b-99.9`
+ * **`_resnet50`** (default)
+ - ENV variables:
+ - CM_MODEL: `resnet50`
+ - CM_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: `yes`
+ * `_retinanet`
+ - ENV variables:
+ - CM_MODEL: `retinanet`
+ - CM_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: `yes`
+ - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: `1`
+ * `_rnnt`
+ - ENV variables:
+ - CM_MODEL: `rnnt`
+ - CM_MLPERF_MODEL_SKIP_BATCHING: `True`
+ - CM_TMP_IGNORE_MLPERF_QUERY_COUNT: `True`
+ * `_sdxl`
+ - ENV variables:
+ - CM_MODEL: `stable-diffusion-xl`
+ - CM_NUM_THREADS: `1`
+
+
+
+
+ * Group "**network**"
+
+ Click here to expand this section.
+
+ * `_network-lon`
+ - ENV variables:
+ - CM_NETWORK_LOADGEN: `lon`
+ - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: `network_loadgen`
+ * `_network-sut`
+ - ENV variables:
+ - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: `network_sut`
+ - CM_NETWORK_LOADGEN: `sut`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * `_bfloat16`
+ - ENV variables:
+ - CM_MLPERF_QUANTIZATION: `False`
+ - CM_MLPERF_MODEL_PRECISION: `bfloat16`
+ * `_float16`
+ - ENV variables:
+ - CM_MLPERF_QUANTIZATION: `False`
+ - CM_MLPERF_MODEL_PRECISION: `float16`
+ * **`_fp32`** (default)
+ - ENV variables:
+ - CM_MLPERF_QUANTIZATION: `False`
+ - CM_MLPERF_MODEL_PRECISION: `float32`
+ * `_int8`
+ - Aliases: `_quantized`
+ - ENV variables:
+ - CM_MLPERF_QUANTIZATION: `True`
+ - CM_MLPERF_MODEL_PRECISION: `int8`
+
+
+
+
+ ##### Default variations
+
+ `_cpu,_fp32,_onnxruntime,_python,_resnet50`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--clean=value` → `CM_MLPERF_CLEAN_SUBMISSION_DIR=value`
+ * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value`
+ * `--dataset=value` → `CM_MLPERF_VISION_DATASET_OPTION=value`
+ * `--dataset_args=value` → `CM_MLPERF_EXTRA_DATASET_ARGS=value`
+ * `--docker=value` → `CM_RUN_DOCKER_CONTAINER=value`
+ * `--hw_name=value` → `CM_HW_NAME=value`
+ * `--imagenet_path=value` → `IMAGENET_PATH=value`
+ * `--max_amps=value` → `CM_MLPERF_POWER_MAX_AMPS=value`
+ * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value`
+ * `--max_volts=value` → `CM_MLPERF_POWER_MAX_VOLTS=value`
+ * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value`
+ * `--model=value` → `CM_MLPERF_CUSTOM_MODEL_PATH=value`
+ * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value`
+ * `--network=value` → `CM_NETWORK_LOADGEN=value`
+ * `--ntp_server=value` → `CM_MLPERF_POWER_NTP_SERVER=value`
+ * `--num_threads=value` → `CM_NUM_THREADS=value`
+ * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value`
+ * `--output_dir=value` → `OUTPUT_BASE_DIR=value`
+ * `--power=value` → `CM_MLPERF_POWER=value`
+ * `--power_server=value` → `CM_MLPERF_POWER_SERVER_ADDRESS=value`
+ * `--regenerate_files=value` → `CM_REGENERATE_MEASURE_FILES=value`
+ * `--rerun=value` → `CM_RERUN=value`
+ * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value`
+ * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value`
+ * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value`
+ * `--sut_servers=value` → `CM_NETWORK_LOADGEN_SUT_SERVERS=value`
+ * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value`
+ * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value`
+ * `--test_query_count=value` → `CM_TEST_QUERY_COUNT=value`
+ * `--threads=value` → `CM_NUM_THREADS=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_MLPERF_LOADGEN_MODE: `accuracy`
+ * CM_MLPERF_LOADGEN_SCENARIO: `Offline`
+ * CM_OUTPUT_FOLDER_NAME: `test_results`
+ * CM_MLPERF_RUN_STYLE: `test`
+ * CM_TEST_QUERY_COUNT: `10`
+ * CM_MLPERF_QUANTIZATION: `False`
+ * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `reference`
+ * CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: ``
+
+
+
+___
+#### Script output
+```bash
+cmr "app vision language mlcommons mlperf inference reference ref [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference/index.md
new file mode 100644
index 0000000000..aefdce10cc
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference/index.md
@@ -0,0 +1,488 @@
+# app-mlperf-inference
+Automatically generated README for this automation recipe: **app-mlperf-inference**
+
+Category: **[Modular MLPerf inference benchmark pipeline](..)**
+
+License: **Apache 2.0**
+
+Developers: [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Grigori Fursin](https://cKnowledge.org/gfursin)
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference/README-extra.md)
+
+
+---
+
+This CM script provides a unified interface to prepare and run a modular version of the [MLPerf inference benchmark](https://arxiv.org/abs/1911.02549)
+across diverse ML models, data sets, frameworks, libraries, run-time systems and platforms
+using the [cross-platform automation meta-framework (MLCommons CM)](https://github.com/mlcommons/ck).
+
+It is assembled from reusable and interoperable [CM scripts for DevOps and MLOps](../list_of_scripts.md)
+being developed by the [open MLCommons taskforce on automation and reproducibility](../mlperf-education-workgroup.md).
+
+It is a higher-level wrapper to several other CM scripts modularizing the MLPerf inference benchmark:
+* [Reference Python implementation](../app-mlperf-inference-reference)
+* [Universal C++ implementation](../app-mlperf-inference-cpp)
+* [TFLite C++ implementation](../app-mlperf-inference-tflite-cpp)
+* [NVidia optimized implementation](app-mlperf-inference-nvidia)
+
+See [this SCC'23 tutorial](https://github.com/mlcommons/ck/blob/master/docs/tutorials/sc22-scc-mlperf.md)
+to use this script to run a reference (unoptimized) Python implementation of the MLPerf object detection benchmark
+with RetinaNet model, Open Images dataset, ONNX runtime and CPU target.
+
+See this [CM script](../run-mlperf-inference-app) to automate and validate your MLPerf inference submission.
+
+Get in touch with the [open taskforce on automation and reproducibility at MLCommons](https://github.com/mlcommons/ck/blob/master/docs/mlperf-education-workgroup.md)
+if you need help with your submission or if you would like to participate in further modularization of MLPerf
+and collaborative design space exploration and optimization of ML Systems.
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "app vision language mlcommons mlperf inference generic" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=app,vision,language,mlcommons,mlperf,inference,generic[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "app vision language mlcommons mlperf inference generic [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'app,vision,language,mlcommons,mlperf,inference,generic'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "app vision language mlcommons mlperf inference generic[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**implementation**"
+
+ Click here to expand this section.
+
+ * `_cpp`
+ - Aliases: `_mil,_mlcommons-cpp`
+ - ENV variables:
+ - CM_MLPERF_CPP: `yes`
+ - CM_MLPERF_IMPLEMENTATION: `mlcommons_cpp`
+ - CM_IMAGENET_ACCURACY_DTYPE: `float32`
+ - CM_OPENIMAGES_ACCURACY_DTYPE: `float32`
+ * `_intel-original`
+ - Aliases: `_intel`
+ - ENV variables:
+ - CM_MLPERF_IMPLEMENTATION: `intel`
+ * `_kilt`
+ - Aliases: `_qualcomm`
+ - ENV variables:
+ - CM_MLPERF_IMPLEMENTATION: `qualcomm`
+ * `_nvidia-original`
+ - Aliases: `_nvidia`
+ - ENV variables:
+ - CM_MLPERF_IMPLEMENTATION: `nvidia`
+ - CM_SQUAD_ACCURACY_DTYPE: `float16`
+ - CM_IMAGENET_ACCURACY_DTYPE: `int32`
+ - CM_CNNDM_ACCURACY_DTYPE: `int32`
+ - CM_LIBRISPEECH_ACCURACY_DTYPE: `int8`
+ * **`_reference`** (default)
+ - Aliases: `_mlcommons-python,_python`
+ - ENV variables:
+ - CM_MLPERF_PYTHON: `yes`
+ - CM_MLPERF_IMPLEMENTATION: `mlcommons_python`
+ - CM_SQUAD_ACCURACY_DTYPE: `float32`
+ - CM_IMAGENET_ACCURACY_DTYPE: `float32`
+ - CM_OPENIMAGES_ACCURACY_DTYPE: `float32`
+ - CM_LIBRISPEECH_ACCURACY_DTYPE: `float32`
+ - CM_CNNDM_ACCURACY_DTYPE: `int32`
+ * `_tflite-cpp`
+ - Aliases: `_ctuning-cpp-tflite`
+ - ENV variables:
+ - CM_MLPERF_TFLITE_CPP: `yes`
+ - CM_MLPERF_CPP: `yes`
+ - CM_MLPERF_IMPLEMENTATION: `ctuning_cpp_tflite`
+ - CM_IMAGENET_ACCURACY_DTYPE: `float32`
+
+
+
+
+ * Group "**backend**"
+
+ Click here to expand this section.
+
+ * `_deepsparse`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `deepsparse`
+ * `_glow`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `glow`
+ * `_ncnn`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `ncnn`
+ * `_onnxruntime`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `onnxruntime`
+ * `_pytorch`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `pytorch`
+ * `_ray`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `ray`
+ * `_tensorrt`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `tensorrt`
+ * `_tf`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `tf`
+ * `_tflite`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `tflite`
+ * `_tvm-onnx`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `tvm-onnx`
+ * `_tvm-pytorch`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `tvm-pytorch`
+ * `_tvm-tflite`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `tvm-tflite`
+
+
+
+
+ * Group "**device**"
+
+ Click here to expand this section.
+
+ * **`_cpu`** (default)
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `cpu`
+ * `_cuda`
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `gpu`
+ * `_qaic`
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `qaic`
+ * `_rocm`
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `rocm`
+ * `_tpu`
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `tpu`
+
+
+
+
+ * Group "**model**"
+
+ Click here to expand this section.
+
+ * `_3d-unet-99`
+ - ENV variables:
+ - CM_MODEL: `3d-unet-99`
+ * `_3d-unet-99.9`
+ - ENV variables:
+ - CM_MODEL: `3d-unet-99.9`
+ * `_bert-99`
+ - ENV variables:
+ - CM_MODEL: `bert-99`
+ * `_bert-99.9`
+ - ENV variables:
+ - CM_MODEL: `bert-99.9`
+ * `_dlrm-v2-99`
+ - ENV variables:
+ - CM_MODEL: `dlrm-v2-99`
+ * `_dlrm-v2-99.9`
+ - ENV variables:
+ - CM_MODEL: `dlrm-v2-99.9`
+ * `_efficientnet`
+ - ENV variables:
+ - CM_MODEL: `efficientnet`
+ * `_gptj-99`
+ - ENV variables:
+ - CM_MODEL: `gptj-99`
+ * `_gptj-99.9`
+ - ENV variables:
+ - CM_MODEL: `gptj-99.9`
+ * `_llama2-70b-99`
+ - ENV variables:
+ - CM_MODEL: `llama2-70b-99`
+ * `_llama2-70b-99.9`
+ - ENV variables:
+ - CM_MODEL: `llama2-70b-99.9`
+ * `_mobilenet`
+ - ENV variables:
+ - CM_MODEL: `mobilenet`
+ * **`_resnet50`** (default)
+ - ENV variables:
+ - CM_MODEL: `resnet50`
+ * `_retinanet`
+ - ENV variables:
+ - CM_MODEL: `retinanet`
+ * `_rnnt`
+ - ENV variables:
+ - CM_MODEL: `rnnt`
+ * `_sdxl`
+ - ENV variables:
+ - CM_MODEL: `stable-diffusion-xl`
+
+
+
+
+ * Group "**precision**"
+
+ Click here to expand this section.
+
+ * `_bfloat16`
+ - ENV variables:
+ - CM_MLPERF_QUANTIZATION: `False`
+ - CM_MLPERF_MODEL_PRECISION: `float32`
+ * `_float16`
+ - ENV variables:
+ - CM_MLPERF_QUANTIZATION: `False`
+ - CM_MLPERF_MODEL_PRECISION: `float32`
+ * **`_float32`** (default)
+ - Aliases: `_fp32`
+ - ENV variables:
+ - CM_MLPERF_QUANTIZATION: `False`
+ - CM_MLPERF_MODEL_PRECISION: `float32`
+ * `_int4`
+ - ENV variables:
+ - CM_MLPERF_QUANTIZATION: `True`
+ - CM_MLPERF_MODEL_PRECISION: `int4`
+ * `_int8`
+ - Aliases: `_quantized`
+ - ENV variables:
+ - CM_MLPERF_QUANTIZATION: `True`
+ - CM_MLPERF_MODEL_PRECISION: `int8`
+ * `_uint8`
+ - ENV variables:
+ - CM_MLPERF_QUANTIZATION: `True`
+ - CM_MLPERF_MODEL_PRECISION: `uint8`
+
+
+
+
+ * Group "**execution-mode**"
+
+ Click here to expand this section.
+
+ * `_fast`
+ - ENV variables:
+ - CM_FAST_FACTOR: `5`
+ - CM_OUTPUT_FOLDER_NAME: `fast_results`
+ - CM_MLPERF_RUN_STYLE: `fast`
+ * **`_test`** (default)
+ - ENV variables:
+ - CM_OUTPUT_FOLDER_NAME: `test_results`
+ - CM_MLPERF_RUN_STYLE: `test`
+ * `_valid`
+ - ENV variables:
+ - CM_OUTPUT_FOLDER_NAME: `valid_results`
+ - CM_MLPERF_RUN_STYLE: `valid`
+
+
+
+
+ * Group "**reproducibility**"
+
+ Click here to expand this section.
+
+ * `_r2.1_default`
+ - ENV variables:
+ - CM_SKIP_SYS_UTILS: `yes`
+ - CM_TEST_QUERY_COUNT: `100`
+ * `_r3.0_default`
+ - ENV variables:
+ - CM_SKIP_SYS_UTILS: `yes`
+ * `_r3.1_default`
+ * `_r4.0_default`
+ - ENV variables:
+ - CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: `/opt/nvmitten-0.1.3-cp38-cp38-linux_x86_64.whl`
+ * `_r4.1_default`
+ - ENV variables:
+ - CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: `/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl`
+
+
+
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_power`
+ - ENV variables:
+ - CM_MLPERF_POWER: `yes`
+ - CM_SYSTEM_POWER: `yes`
+
+
+
+
+ * Group "**batch_size**"
+
+ Click here to expand this section.
+
+ * `_batch_size.#`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: `#`
+
+
+
+
+ * Group "**loadgen-scenario**"
+
+ Click here to expand this section.
+
+ * `_multistream`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream`
+ * **`_offline`** (default)
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `Offline`
+ * `_server`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `Server`
+ * `_singlestream`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream`
+
+
+
+
+ ##### Default variations
+
+ `_cpu,_float32,_offline,_reference,_resnet50,_test`
+=== "Input Flags"
+
+
+ #### Input Flags
+
+ * --**scenario:** MLPerf inference scenario {Offline,Server,SingleStream,MultiStream} (*Offline*)
+ * --**mode:** MLPerf inference mode {performance,accuracy} (*accuracy*)
+ * --**test_query_count:** Specifies the number of samples to be processed during a test run
+ * --**target_qps:** Target QPS
+ * --**target_latency:** Target Latency
+ * --**max_batchsize:** Maximum batchsize to be used
+ * --**num_threads:** Number of CPU threads to launch the application with
+ * --**hw_name:** Valid value - any system description which has a config file (under same name) defined [here](https://github.com/mlcommons/cm4mlops/tree/main/script/get-configs-sut-mlperf-inference/configs)
+ * --**output_dir:** Location where the outputs are produced
+ * --**rerun:** Redo the run even if previous run files exist (*True*)
+ * --**regenerate_files:** Regenerates measurement files including accuracy.txt files even if a previous run exists. This option is redundant if `--rerun` is used
+ * --**adr.python.name:** Python virtual environment name (optional) (*mlperf*)
+ * --**adr.python.version_min:** Minimal Python version (*3.8*)
+ * --**adr.python.version:** Force Python version (must have all system deps)
+ * --**adr.compiler.tags:** Compiler for loadgen (*gcc*)
+ * --**adr.inference-src-loadgen.env.CM_GIT_URL:** Git URL for MLPerf inference sources to build LoadGen (to enable non-reference implementations)
+ * --**adr.inference-src.env.CM_GIT_URL:** Git URL for MLPerf inference sources to run benchmarks (to enable non-reference implementations)
+ * --**quiet:** Quiet run (select default values for all questions) (*False*)
+ * --**readme:** Generate README with the reproducibility report
+ * --**debug:** Debug MLPerf script
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--clean=value` → `CM_MLPERF_CLEAN_SUBMISSION_DIR=value`
+ * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value`
+ * `--debug=value` → `CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM=value`
+ * `--docker=value` → `CM_RUN_DOCKER_CONTAINER=value`
+ * `--gpu_name=value` → `CM_NVIDIA_GPU_NAME=value`
+ * `--hw_name=value` → `CM_HW_NAME=value`
+ * `--imagenet_path=value` → `IMAGENET_PATH=value`
+ * `--max_amps=value` → `CM_MLPERF_POWER_MAX_AMPS=value`
+ * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value`
+ * `--max_volts=value` → `CM_MLPERF_POWER_MAX_VOLTS=value`
+ * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value`
+ * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value`
+ * `--ntp_server=value` → `CM_MLPERF_POWER_NTP_SERVER=value`
+ * `--num_threads=value` → `CM_NUM_THREADS=value`
+ * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value`
+ * `--output_dir=value` → `OUTPUT_BASE_DIR=value`
+ * `--power=value` → `CM_MLPERF_POWER=value`
+ * `--power_server=value` → `CM_MLPERF_POWER_SERVER_ADDRESS=value`
+ * `--readme=value` → `CM_MLPERF_README=value`
+ * `--regenerate_files=value` → `CM_REGENERATE_MEASURE_FILES=value`
+ * `--rerun=value` → `CM_RERUN=value`
+ * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value`
+ * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value`
+ * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value`
+ * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value`
+ * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value`
+ * `--test_query_count=value` → `CM_TEST_QUERY_COUNT=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_MLPERF_LOADGEN_MODE: `accuracy`
+ * CM_MLPERF_LOADGEN_SCENARIO: `Offline`
+ * CM_OUTPUT_FOLDER_NAME: `test_results`
+ * CM_MLPERF_RUN_STYLE: `test`
+ * CM_TEST_QUERY_COUNT: `10`
+ * CM_MLPERF_QUANTIZATION: `False`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "app vision language mlcommons mlperf inference generic [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/benchmark-program-mlperf/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/benchmark-program-mlperf/index.md
new file mode 100644
index 0000000000..482ef2c01b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/benchmark-program-mlperf/index.md
@@ -0,0 +1,100 @@
+# benchmark-program-mlperf
+Automatically generated README for this automation recipe: **benchmark-program-mlperf**
+
+Category: **[Modular MLPerf inference benchmark pipeline](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-program-mlperf/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "mlperf benchmark-mlperf" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=mlperf,benchmark-mlperf[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "mlperf benchmark-mlperf [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'mlperf,benchmark-mlperf'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "mlperf benchmark-mlperf[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**power-mode**"
+
+ Click here to expand this section.
+
+ * **`_no-power`** (default)
+ * `_power`
+ - ENV variables:
+ - CM_MLPERF_POWER: `yes`
+
+
+
+
+ ##### Default variations
+
+ `_no-power`
+
+___
+#### Script output
+```bash
+cmr "mlperf benchmark-mlperf [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/index.md
new file mode 100644
index 0000000000..01e67ecc51
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/index.md
@@ -0,0 +1,7 @@
+* [app-loadgen-generic-python](app-loadgen-generic-python/index.md)
+* [app-mlperf-inference](app-mlperf-inference/index.md)
+* [app-mlperf-inference-ctuning-cpp-tflite](app-mlperf-inference-ctuning-cpp-tflite/index.md)
+* [app-mlperf-inference-mlcommons-cpp](app-mlperf-inference-mlcommons-cpp/index.md)
+* [app-mlperf-inference-mlcommons-python](app-mlperf-inference-mlcommons-python/index.md)
+* [benchmark-program-mlperf](benchmark-program-mlperf/index.md)
+* [run-mlperf-inference-app](run-mlperf-inference-app/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/run-mlperf-inference-app/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/run-mlperf-inference-app/index.md
new file mode 100644
index 0000000000..c05c90c38c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/run-mlperf-inference-app/index.md
@@ -0,0 +1,326 @@
+# run-mlperf-inference-app
+Automatically generated README for this automation recipe: **run-mlperf-inference-app**
+
+Category: **[Modular MLPerf inference benchmark pipeline](..)**
+
+License: **Apache 2.0**
+
+Developers: [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin)
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-app/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-app/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "run-mlperf,inference" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=run-mlperf,inference[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "run-mlperf,inference [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'run-mlperf,inference'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "run-mlperf,inference[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_all-scenarios`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_ALL_SCENARIOS: `yes`
+ * `_compliance`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_COMPLIANCE: `yes`
+ * `_dashboard`
+ - ENV variables:
+ - CM_MLPERF_DASHBOARD: `on`
+
+
+
+
+ * Group "**benchmark-version**"
+
+ Click here to expand this section.
+
+ * `_r2.1`
+ - ENV variables:
+ - CM_MLPERF_INFERENCE_VERSION: `2.1`
+ - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: `r2.1_default`
+ * `_r3.0`
+ - ENV variables:
+ - CM_MLPERF_INFERENCE_VERSION: `3.0`
+ - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: `r3.0_default`
+ * `_r3.1`
+ - ENV variables:
+ - CM_MLPERF_INFERENCE_VERSION: `3.1`
+ - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: `r3.1_default`
+ * `_r4.0`
+ - ENV variables:
+ - CM_MLPERF_INFERENCE_VERSION: `4.0`
+ - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: `r4.0_default`
+ * `_r4.1`
+ - ENV variables:
+ - CM_MLPERF_INFERENCE_VERSION: `4.1`
+ - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: `r4.1_default`
+
+
+
+
+ * Group "**mode**"
+
+ Click here to expand this section.
+
+ * `_all-modes`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_ALL_MODES: `yes`
+
+
+
+
+ * Group "**submission-generation**"
+
+ Click here to expand this section.
+
+ * `_accuracy-only`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_MODE: `accuracy`
+ - CM_MLPERF_SUBMISSION_RUN: `yes`
+ - CM_RUN_MLPERF_ACCURACY: `on`
+ - CM_RUN_SUBMISSION_CHECKER: `no`
+ * `_find-performance`
+ - ENV variables:
+ - CM_MLPERF_FIND_PERFORMANCE_MODE: `yes`
+ - CM_MLPERF_LOADGEN_ALL_MODES: `no`
+ - CM_MLPERF_LOADGEN_MODE: `performance`
+ - CM_MLPERF_RESULT_PUSH_TO_GITHUB: `False`
+ * **`_performance-and-accuracy`** (default)
+ * `_performance-only`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_MODE: `performance`
+ - CM_MLPERF_SUBMISSION_RUN: `yes`
+ - CM_RUN_SUBMISSION_CHECKER: `no`
+ * `_populate-readme`
+ - ENV variables:
+ - CM_MLPERF_README: `yes`
+ - CM_MLPERF_SUBMISSION_RUN: `yes`
+ - CM_RUN_SUBMISSION_CHECKER: `no`
+ * `_submission`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_COMPLIANCE: `yes`
+ - CM_MLPERF_SUBMISSION_RUN: `yes`
+ - CM_RUN_MLPERF_ACCURACY: `on`
+ - CM_RUN_SUBMISSION_CHECKER: `yes`
+ - CM_TAR_SUBMISSION_DIR: `yes`
+
+
+
+
+ * Group "**submission-generation-style**"
+
+ Click here to expand this section.
+
+ * `_full`
+ - ENV variables:
+ - CM_MLPERF_SUBMISSION_GENERATION_STYLE: `full`
+ - CM_MLPERF_SKIP_SUBMISSION_GENERATION: `yes`
+ * **`_short`** (default)
+ - ENV variables:
+ - CM_MLPERF_SUBMISSION_GENERATION_STYLE: `short`
+
+
+
+
+ ##### Default variations
+
+ `_performance-and-accuracy,_short`
+=== "Input Flags"
+
+
+ #### Input Flags
+
+ * --**division:** MLPerf division {open,closed} (*open*)
+ * --**category:** MLPerf category {edge,datacenter,network} (*edge*)
+ * --**device:** MLPerf device {cpu,cuda,rocm,qaic} (*cpu*)
+ * --**model:** MLPerf model {resnet50,retinanet,bert-99,bert-99.9,3d-unet-99,3d-unet-99.9,rnnt,dlrm-v2-99,dlrm-v2-99.9,gptj-99,gptj-99.9,sdxl,llama2-70b-99,llama2-70b-99.9,mobilenet,efficientnet} (*resnet50*)
+ * --**precision:** MLPerf model precision {float32,float16,bfloat16,int8,uint8}
+ * --**implementation:** MLPerf implementation {mlcommons-python,mlcommons-cpp,nvidia,intel,qualcomm,ctuning-cpp-tflite} (*mlcommons-python*)
+ * --**backend:** MLPerf framework (backend) {onnxruntime,tf,pytorch,deepsparse,tensorrt,glow,tvm-onnx} (*onnxruntime*)
+ * --**scenario:** MLPerf scenario {Offline,Server,SingleStream,MultiStream} (*Offline*)
+ * --**mode:** MLPerf benchmark mode {,accuracy,performance}
+ * --**execution_mode:** MLPerf execution mode {test,fast,valid} (*test*)
+ * --**sut:** SUT configuration (if known)
+ * --**submitter:** Submitter name (without space) (*CTuning*)
+ * --**results_dir:** Folder path to store results (defaults to the current working directory)
+ * --**submission_dir:** Folder path to store MLPerf submission tree
+ * --**adr.compiler.tags:** Compiler for loadgen and any C/C++ part of implementation
+ * --**adr.inference-src-loadgen.env.CM_GIT_URL:** Git URL for MLPerf inference sources to build LoadGen (to enable non-reference implementations)
+ * --**adr.inference-src.env.CM_GIT_URL:** Git URL for MLPerf inference sources to run benchmarks (to enable non-reference implementations)
+ * --**adr.mlperf-inference-implementation.max_batchsize:** Maximum batchsize to be used
+ * --**adr.mlperf-inference-implementation.num_threads:** Number of threads (reference & C++ implementation only)
+ * --**adr.python.name:** Python virtual environment name (optional)
+ * --**adr.python.version:** Force Python version (must have all system deps)
+ * --**adr.python.version_min:** Minimal Python version (*3.8*)
+ * --**power:** Measure power {yes,no} (*no*)
+ * --**adr.mlperf-power-client.power_server:** MLPerf Power server IP address (*192.168.0.15*)
+ * --**adr.mlperf-power-client.port:** MLPerf Power server port (*4950*)
+ * --**clean:** Clean run (*False*)
+ * --**compliance:** Whether to run compliance tests (applicable only for closed division) {yes,no} (*no*)
+ * --**dashboard_wb_project:** W&B dashboard project (*cm-mlperf-dse-testing*)
+ * --**dashboard_wb_user:** W&B dashboard user (*cmind*)
+ * --**hw_name:** MLPerf hardware name (for example "gcp.c3_standard_8", "nvidia_orin", "lenovo_p14s_gen_4_windows_11", "macbook_pro_m1_2", "thundercomm_rb6" ...)
+ * --**multistream_target_latency:** Set MultiStream target latency
+ * --**offline_target_qps:** Set LoadGen Offline target QPS
+ * --**quiet:** Quiet run (select default values for all questions) (*True*)
+ * --**server_target_qps:** Set Server target QPS
+ * --**singlestream_target_latency:** Set SingleStream target latency
+ * --**target_latency:** Set Target latency
+ * --**target_qps:** Set LoadGen target QPS
+ * --**j:** Print results dictionary to console at the end of the run (*False*)
+ * --**repro:** Record input/output/state/info files to make it easier to reproduce results (*False*)
+ * --**time:** Print script execution time at the end of the run (*True*)
+ * --**debug:** Debug this script (*False*)
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--backend=value` → `CM_MLPERF_BACKEND=value`
+ * `--batch_size=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value`
+ * `--beam_size=value` → `GPTJ_BEAM_SIZE=value`
+ * `--category=value` → `CM_MLPERF_SUBMISSION_SYSTEM_TYPE=value`
+ * `--clean=value` → `CM_MLPERF_CLEAN_ALL=value`
+ * `--compliance=value` → `CM_MLPERF_LOADGEN_COMPLIANCE=value`
+ * `--dashboard_wb_project=value` → `CM_MLPERF_DASHBOARD_WANDB_PROJECT=value`
+ * `--dashboard_wb_user=value` → `CM_MLPERF_DASHBOARD_WANDB_USER=value`
+ * `--debug=value` → `CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM=value`
+ * `--device=value` → `CM_MLPERF_DEVICE=value`
+ * `--division=value` → `CM_MLPERF_SUBMISSION_DIVISION=value`
+ * `--docker=value` → `CM_MLPERF_USE_DOCKER=value`
+ * `--dump_version_info=value` → `CM_DUMP_VERSION_INFO=value`
+ * `--execution_mode=value` → `CM_MLPERF_RUN_STYLE=value`
+ * `--find_performance=value` → `CM_MLPERF_FIND_PERFORMANCE_MODE=value`
+ * `--gpu_name=value` → `CM_NVIDIA_GPU_NAME=value`
+ * `--hw_name=value` → `CM_HW_NAME=value`
+ * `--hw_notes_extra=value` → `CM_MLPERF_SUT_SW_NOTES_EXTRA=value`
+ * `--imagenet_path=value` → `IMAGENET_PATH=value`
+ * `--implementation=value` → `CM_MLPERF_IMPLEMENTATION=value`
+ * `--lang=value` → `CM_MLPERF_IMPLEMENTATION=value`
+ * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value`
+ * `--model=value` → `CM_MLPERF_MODEL=value`
+ * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value`
+ * `--network=value` → `CM_NETWORK_LOADGEN=value`
+ * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value`
+ * `--output_dir=value` → `OUTPUT_BASE_DIR=value`
+ * `--output_summary=value` → `MLPERF_INFERENCE_SUBMISSION_SUMMARY=value`
+ * `--output_tar=value` → `MLPERF_INFERENCE_SUBMISSION_TAR_FILE=value`
+ * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value`
+ * `--power=value` → `CM_SYSTEM_POWER=value`
+ * `--precision=value` → `CM_MLPERF_MODEL_PRECISION=value`
+ * `--preprocess_submission=value` → `CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR=value`
+ * `--push_to_github=value` → `CM_MLPERF_RESULT_PUSH_TO_GITHUB=value`
+ * `--readme=value` → `CM_MLPERF_README=value`
+ * `--regenerate_accuracy_file=value` → `CM_MLPERF_REGENERATE_ACCURACY_FILE=value`
+ * `--regenerate_files=value` → `CM_REGENERATE_MEASURE_FILES=value`
+ * `--rerun=value` → `CM_RERUN=value`
+ * `--results_dir=value` → `OUTPUT_BASE_DIR=value`
+ * `--results_git_url=value` → `CM_MLPERF_RESULTS_GIT_REPO_URL=value`
+ * `--run_checker=value` → `CM_RUN_SUBMISSION_CHECKER=value`
+ * `--run_style=value` → `CM_MLPERF_RUN_STYLE=value`
+ * `--save_console_log=value` → `CM_SAVE_CONSOLE_LOG=value`
+ * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value`
+ * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value`
+ * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value`
+ * `--skip_submission_generation=value` → `CM_MLPERF_SKIP_SUBMISSION_GENERATION=value`
+ * `--skip_truncation=value` → `CM_SKIP_TRUNCATE_ACCURACY=value`
+ * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value`
+ * `--submitter=value` → `CM_MLPERF_SUBMITTER=value`
+ * `--sut=value` → `CM_MLPERF_INFERENCE_SUT_VARIATION=value`
+ * `--sut_servers=value` → `CM_NETWORK_LOADGEN_SUT_SERVERS=value`
+ * `--sw_notes_extra=value` → `CM_MLPERF_SUT_SW_NOTES_EXTRA=value`
+ * `--system_type=value` → `CM_MLPERF_SUBMISSION_SYSTEM_TYPE=value`
+ * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value`
+ * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value`
+ * `--test_query_count=value` → `CM_TEST_QUERY_COUNT=value`
+ * `--threads=value` → `CM_NUM_THREADS=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_MLPERF_IMPLEMENTATION: `reference`
+ * CM_MLPERF_MODEL: `resnet50`
+ * CM_MLPERF_RUN_STYLE: `test`
+
+
+#### Versions
+* `master`
+* `r2.1`
+
+___
+#### Script output
+```bash
+cmr "run-mlperf,inference [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-nvidia/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-nvidia/index.md
new file mode 100644
index 0000000000..676d855705
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-nvidia/index.md
@@ -0,0 +1,165 @@
+# app-mlperf-training-nvidia
+Automatically generated README for this automation recipe: **app-mlperf-training-nvidia**
+
+Category: **[Modular MLPerf training benchmark pipeline](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-training-nvidia/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "app vision language mlcommons mlperf training nvidia" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=app,vision,language,mlcommons,mlperf,training,nvidia[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "app vision language mlcommons mlperf training nvidia [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'app,vision,language,mlcommons,mlperf,training,nvidia'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "app vision language mlcommons mlperf training nvidia[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_bert`
+ - ENV variables:
+ - CM_MLPERF_MODEL: `bert`
+
+
+
+
+ * Group "**device**"
+
+ Click here to expand this section.
+
+ * **`_cuda`** (default)
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `cuda`
+ - USE_CUDA: `True`
+ * `_tpu`
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `tpu`
+ - CUDA_VISIBLE_DEVICES: ``
+ - USE_CUDA: `False`
+
+
+
+
+ * Group "**framework**"
+
+ Click here to expand this section.
+
+ * `_pytorch`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `pytorch`
+ - CM_MLPERF_BACKEND_VERSION: `<<>>`
+ * `_tf`
+ - Aliases: `_tensorflow`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `tf`
+ - CM_MLPERF_BACKEND_VERSION: `<<>>`
+
+
+
+
+ ##### Default variations
+
+ `_cuda`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--clean=value` → `CM_MLPERF_CLEAN_SUBMISSION_DIR=value`
+ * `--docker=value` → `CM_RUN_DOCKER_CONTAINER=value`
+ * `--hw_name=value` → `CM_HW_NAME=value`
+ * `--model=value` → `CM_MLPERF_CUSTOM_MODEL_PATH=value`
+ * `--num_threads=value` → `CM_NUM_THREADS=value`
+ * `--output_dir=value` → `OUTPUT_BASE_DIR=value`
+ * `--rerun=value` → `CM_RERUN=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `nvidia`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-bert-training.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-training-nvidia/run-bert-training.sh)
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-training-nvidia/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "app vision language mlcommons mlperf training nvidia [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-reference/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-reference/index.md
new file mode 100644
index 0000000000..4adad297fe
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-reference/index.md
@@ -0,0 +1,166 @@
+# app-mlperf-training-reference
+Automatically generated README for this automation recipe: **app-mlperf-training-reference**
+
+Category: **[Modular MLPerf training benchmark pipeline](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-training-reference/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "app vision language mlcommons mlperf training reference ref" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=app,vision,language,mlcommons,mlperf,training,reference,ref[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "app vision language mlcommons mlperf training reference ref [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'app,vision,language,mlcommons,mlperf,training,reference,ref'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "app vision language mlcommons mlperf training reference ref[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_bert`
+ - ENV variables:
+ - CM_MLPERF_MODEL: `bert`
+
+
+
+
+ * Group "**device**"
+
+ Click here to expand this section.
+
+ * **`_cuda`** (default)
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `cuda`
+ - USE_CUDA: `True`
+ * `_tpu`
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `tpu`
+ - CUDA_VISIBLE_DEVICES: ``
+ - USE_CUDA: `False`
+
+
+
+
+ * Group "**framework**"
+
+ Click here to expand this section.
+
+ * `_pytorch`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `pytorch`
+ - CM_MLPERF_BACKEND_VERSION: `<<>>`
+ * `_tf`
+ - Aliases: `_tensorflow`
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `tf`
+ - CM_MLPERF_BACKEND_VERSION: `<<>>`
+
+
+
+
+ ##### Default variations
+
+ `_cuda`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--clean=value` → `CM_MLPERF_CLEAN_SUBMISSION_DIR=value`
+ * `--docker=value` → `CM_RUN_DOCKER_CONTAINER=value`
+ * `--hw_name=value` → `CM_HW_NAME=value`
+ * `--model=value` → `CM_MLPERF_CUSTOM_MODEL_PATH=value`
+ * `--num_threads=value` → `CM_NUM_THREADS=value`
+ * `--output_dir=value` → `OUTPUT_BASE_DIR=value`
+ * `--rerun=value` → `CM_RERUN=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `reference`
+ * CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: ``
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-bert-training.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-training-reference/run-bert-training.sh)
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-training-reference/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "app vision language mlcommons mlperf training reference ref [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/index.md
new file mode 100644
index 0000000000..73140884be
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/index.md
@@ -0,0 +1,2 @@
+* [app-mlperf-training-nvidia](app-mlperf-training-nvidia/index.md)
+* [app-mlperf-training-reference](app-mlperf-training-reference/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-application-pipeline/app-image-corner-detection/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-application-pipeline/app-image-corner-detection/index.md
new file mode 100644
index 0000000000..1d71d7f6f2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-application-pipeline/app-image-corner-detection/index.md
@@ -0,0 +1,87 @@
+# app-image-corner-detection
+Automatically generated README for this automation recipe: **app-image-corner-detection**
+
+Category: **[Modular application pipeline](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-corner-detection/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-corner-detection/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "app image corner-detection" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=app,image,corner-detection
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "app image corner-detection "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'app,image,corner-detection'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "app image corner-detection"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-corner-detection/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "app image corner-detection " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-application-pipeline/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-application-pipeline/index.md
new file mode 100644
index 0000000000..96076be6fe
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-application-pipeline/index.md
@@ -0,0 +1 @@
+* [app-image-corner-detection](app-image-corner-detection/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/detect-cpu/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/detect-cpu/index.md
new file mode 100644
index 0000000000..a45f8a03ff
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/detect-cpu/index.md
@@ -0,0 +1,87 @@
+# detect-cpu
+Automatically generated README for this automation recipe: **detect-cpu**
+
+Category: **[Platform information](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-cpu/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-cpu/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "detect cpu detect-cpu info" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=detect,cpu,detect-cpu,info
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "detect cpu detect-cpu info "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'detect,cpu,detect-cpu,info'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "detect cpu detect-cpu info"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-cpu/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-cpu/run.bat)
+___
+#### Script output
+```bash
+cmr "detect cpu detect-cpu info " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/detect-os/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/detect-os/index.md
new file mode 100644
index 0000000000..92e04fa5ee
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/detect-os/index.md
@@ -0,0 +1,86 @@
+# detect-os
+Automatically generated README for this automation recipe: **detect-os**
+
+Category: **[Platform information](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-os/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "detect-os detect os info" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=detect-os,detect,os,info
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "detect-os detect os info "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'detect-os,detect,os,info'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "detect-os detect os info"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-os/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-os/run.bat)
+___
+#### Script output
+```bash
+cmr "detect-os detect os info " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/index.md
new file mode 100644
index 0000000000..7a25f8c343
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/index.md
@@ -0,0 +1,2 @@
+* [detect-cpu](detect-cpu/index.md)
+* [detect-os](detect-os/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/activate-python-venv/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/activate-python-venv/index.md
new file mode 100644
index 0000000000..90f6bb959b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/activate-python-venv/index.md
@@ -0,0 +1,88 @@
+# Activate virtual Python environment
+Automatically generated README for this automation recipe: **activate-python-venv**
+
+Category: **[Python automation](..)**
+
+License: **Apache 2.0**
+
+Developers: [Grigori Fursin](https://cKnowledge.org/gfursin)
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/activate-python-venv/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/activate-python-venv/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "activate python-venv" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=activate,python-venv
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "activate python-venv "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'activate,python-venv'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "activate python-venv"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/activate-python-venv/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/activate-python-venv/run.bat)
+___
+#### Script output
+```bash
+cmr "activate python-venv " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/get-generic-python-lib/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/get-generic-python-lib/index.md
new file mode 100644
index 0000000000..0cf418eb22
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/get-generic-python-lib/index.md
@@ -0,0 +1,421 @@
+# get-generic-python-lib
+Automatically generated README for this automation recipe: **get-generic-python-lib**
+
+Category: **[Python automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-python-lib/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-python-lib/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get generic-python-lib" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,generic-python-lib[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get generic-python-lib [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,generic-python-lib'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get generic-python-lib[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_Pillow`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `Pillow`
+ * `_anthropic`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `anthropic`
+ * `_apache-tvm`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `apache-tvm`
+ - CM_GENERIC_PYTHON_PIP_EXTRA: ` --pre`
+ * `_apex`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `apex`
+ * `_async_timeout`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `async_timeout`
+ * `_attr`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `attr`
+ * `_attrs`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `attrs`
+ * `_boto3`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `boto3`
+ * `_cloudpickle`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `cloudpickle`
+ * `_cmind`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `cmind`
+ * `_colored`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `colored`
+ - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://pypi.ngc.nvidia.com`
+ * `_conda.#`
+ * `_cupy`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `cupy`
+ * `_custom-python`
+ - ENV variables:
+ - CM_TMP_USE_CUSTOM_PYTHON: `on`
+ * `_datasets`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `datasets`
+ * `_decorator`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `decorator`
+ * `_deepsparse`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `deepsparse`
+ * `_dllogger`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `dllogger`
+ - CM_GENERIC_PYTHON_PIP_URL: `git+https://github.com/NVIDIA/dllogger#egg=dllogger`
+ * `_fiftyone`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `fiftyone`
+ * `_google-api-python-client`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `google_api_python_client`
+ * `_google-auth-oauthlib`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `google_auth_oauthlib`
+ * `_huggingface_hub`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `huggingface_hub`
+ * `_inflect`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `inflect`
+ * `_jax`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `jax`
+ * `_jax_cuda`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `jax[cuda]`
+ - CM_GENERIC_PYTHON_PIP_EXTRA: `-f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html`
+ - CM_JAX_VERSION_EXTRA: `CUDA`
+ * `_librosa`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `librosa`
+ * `_matplotlib`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `matplotlib`
+ * `_mlperf_loadgen`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `mlperf_loadgen`
+ - CM_GENERIC_PYTHON_PIP_URL: `git+https://github.com/mlcommons/inference.git#subdirectory=loadgen`
+ * `_mlperf_logging`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `mlperf_logging`
+ - CM_GENERIC_PYTHON_PIP_URL: `git+https://github.com/mlperf/logging.git`
+ * `_mpld3`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `mpld3`
+ * `_nibabel`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `nibabel`
+ * `_numpy`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `numpy`
+ * `_nvidia-apex`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `apex`
+ - CM_GENERIC_PYTHON_PACKAGE_VARIANT: `nvidia-apex`
+ - CM_GENERIC_PYTHON_PIP_URL: `git+https://github.com/nvidia/apex@0da3ffb92ee6fbe5336602f0e3989db1cd16f880`
+ * `_nvidia-apex-from-src`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `apex`
+ - CM_GENERIC_PYTHON_PACKAGE_VARIANT: `nvidia-apex`
+ * `_nvidia-dali`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `nvidia-dali-cuda120`
+ - CM_GENERIC_PYTHON_PIP_EXTRA: ` --upgrade --default-timeout=900`
+ - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://developer.download.nvidia.com/compute/redist`
+ * `_nvidia-pycocotools`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: `pycocotools`
+ - CM_GENERIC_PYTHON_PIP_URL: `pycocotools@git+https://github.com/NVIDIA/cocoapi#subdirectory=PythonAPI`
+ * `_nvidia-pyindex`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `nvidia-pyindex`
+ * `_nvidia-tensorrt`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `nvidia-tensorrt`
+ * `_onnx`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `onnx`
+ * `_onnx-graphsurgeon`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `onnx_graphsurgeon`
+ * `_onnxruntime`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `onnxruntime`
+ * `_onnxruntime_gpu`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `onnxruntime_gpu`
+ - CM_ONNXRUNTIME_VERSION_EXTRA: `GPU`
+ * `_openai`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `openai`
+ * `_opencv-python`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `opencv-python`
+ * `_package.#`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `#`
+ - CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: ``
+ - CM_GENERIC_PYTHON_PIP_URL: ``
+ * `_pandas`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `pandas`
+ * `_path.#`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PIP_URL: `#`
+ * `_pillow`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `Pillow`
+ * `_pip`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `pip`
+ * `_polygraphy`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `polygraphy`
+ - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://pypi.ngc.nvidia.com`
+ * `_pre`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_DEV_VERSION: `yes`
+ * `_protobuf`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `protobuf`
+ * `_psutil`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `psutil`
+ * `_pycocotools`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `pycocotools`
+ * `_pycuda`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `pycuda`
+ * `_ray`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `ray[default]`
+ * `_requests`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `requests`
+ * `_rocm`
+ * `_safetensors`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `safetensors`
+ * `_scikit-learn`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `scikit-learn`
+ * `_scipy`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `scipy`
+ * `_scons`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `scons`
+ * `_setfit`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `setfit`
+ * `_setuptools`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `setuptools`
+ * `_six`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `six`
+ * `_sklearn`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `sklearn`
+ * `_sox`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `sox`
+ * `_sparsezoo`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `sparsezoo`
+ * `_streamlit`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `streamlit`
+ * `_streamlit_option_menu`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `streamlit_option_menu`
+ * `_tensorboard`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `tensorboard`
+ * `_tensorflow`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `tensorflow`
+ * `_tensorrt`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `tensorrt`
+ - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://download.pytorch.org/whl/<<>>`
+ - CM_TORCH_VERSION_EXTRA: `CUDA`
+ * `_tflite`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `tflite`
+ * `_tflite-runtime`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `tflite-runtime`
+ * `_tokenization`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `tokenization`
+ * `_toml`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `toml`
+ * `_torch`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `torch`
+ - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://download.pytorch.org/whl/cpu`
+ * `_torch_cuda`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `torch`
+ - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL1: `https://download.pytorch.org/whl/<<>>`
+ - CM_TORCH_VERSION_EXTRA: `CUDA`
+ * `_torch_tensorrt`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `torch-tensorrt`
+ - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://download.pytorch.org/whl/<<>>`
+ - CM_TORCH_VERSION_EXTRA: `CUDA`
+ * `_torchaudio`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `torchaudio`
+ - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://download.pytorch.org/whl/cpu`
+ * `_torchaudio_cuda`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `torchaudio`
+ - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL1: `https://download.pytorch.org/whl/<<>>`
+ - CM_TORCHAUDIO_VERSION_EXTRA: `CUDA`
+ * `_torchvision`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `torchvision`
+ - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://download.pytorch.org/whl/cpu`
+ * `_torchvision_cuda`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `torchvision`
+ - CM_TORCHVISION_VERSION_EXTRA: `CUDA`
+ * `_tornado`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `tornado`
+ * `_tqdm`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `tqdm`
+ * `_transformers`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `transformers`
+ * `_typing_extensions`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `typing_extensions`
+ * `_ujson`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `ujson`
+ * `_unidecode`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `unidecode`
+ * `_url.#`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PIP_URL: `#`
+ - CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL: `yes`
+ * `_wandb`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `wandb`
+ * `_west`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `west`
+ * `_xgboost`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `xgboost`
+ * `_xlsxwriter`
+ - ENV variables:
+ - CM_GENERIC_PYTHON_PACKAGE_NAME: `xlsxwriter`
+
+
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--extra_index_url=value` → `CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL=value`
+ * `--force_install=value` → `CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL=value`
+ * `--index_url=value` → `CM_GENERIC_PYTHON_PIP_INDEX_URL=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-python-lib/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-python-lib/run.bat)
+___
+#### Script output
+```bash
+cmr "get generic-python-lib [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/get-python3/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/get-python3/index.md
new file mode 100644
index 0000000000..9544d87903
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/get-python3/index.md
@@ -0,0 +1,111 @@
+# get-python3
+Automatically generated README for this automation recipe: **get-python3**
+
+Category: **[Python automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get python python3 get-python get-python3" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,python,python3,get-python,get-python3[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get python python3 get-python get-python3 [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,python,python3,get-python,get-python3'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get python python3 get-python get-python3[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_conda.#`
+ - ENV variables:
+ - CM_PYTHON_CONDA: `yes`
+ - CM_PYTHON_INSTALL_CACHE_TAGS: `_conda.#`
+ * `_custom-path.#`
+ - ENV variables:
+ - CM_PYTHON_BIN_WITH_PATH: `#`
+ * `_lto`
+ * `_optimized`
+ * `_shared`
+ * `_with-custom-ssl`
+ * `_with-ssl`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3/run.bat)
+___
+#### Script output
+```bash
+cmr "get python python3 get-python get-python3 [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/index.md
new file mode 100644
index 0000000000..38a4cd7eca
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/index.md
@@ -0,0 +1,6 @@
+* [activate-python-venv](activate-python-venv/index.md)
+* [get-generic-python-lib](get-generic-python-lib/index.md)
+* [get-python3](get-python3/index.md)
+* [install-generic-conda-package](install-generic-conda-package/index.md)
+* [install-python-src](install-python-src/index.md)
+* [install-python-venv](install-python-venv/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-generic-conda-package/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-generic-conda-package/index.md
new file mode 100644
index 0000000000..1c663d5741
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-generic-conda-package/index.md
@@ -0,0 +1,113 @@
+# install-generic-conda-package
+Automatically generated README for this automation recipe: **install-generic-conda-package**
+
+Category: **[Python automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-generic-conda-package/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get install generic generic-conda-lib conda-lib conda-package generic-conda-package" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,install,generic,generic-conda-lib,conda-lib,conda-package,generic-conda-package[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get install generic generic-conda-lib conda-lib conda-package generic-conda-package [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,install,generic,generic-conda-lib,conda-lib,conda-package,generic-conda-package'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get install generic generic-conda-lib conda-lib conda-package generic-conda-package[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_name.#`
+ * `_package.#`
+ - ENV variables:
+ - CM_CONDA_PKG_NAME: `#`
+
+
+
+
+ * Group "**package-source**"
+
+ Click here to expand this section.
+
+ * `_source.#`
+ - ENV variables:
+ - CM_CONDA_PKG_SRC: `#`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-generic-conda-package/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get install generic generic-conda-lib conda-lib conda-package generic-conda-package [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-python-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-python-src/index.md
new file mode 100644
index 0000000000..f43cebd9fe
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-python-src/index.md
@@ -0,0 +1,144 @@
+# install-python-src
+Automatically generated README for this automation recipe: **install-python-src**
+
+Category: **[Python automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install src python python3 src-python3 src-python" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,src,python,python3,src-python3,src-python[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install src python python3 src-python3 src-python [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,src,python,python3,src-python3,src-python'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install src python python3 src-python3 src-python[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_lto`
+ - ENV variables:
+ - CM_PYTHON_LTO_FLAG: ` --lto`
+ - CM_PYTHON_INSTALL_CACHE_TAGS: `with-lto`
+ * `_optimized`
+ - ENV variables:
+ - CM_PYTHON_OPTIMIZATION_FLAG: ` --enable-optimizations`
+ - CM_PYTHON_INSTALL_CACHE_TAGS: `optimized`
+ * `_shared`
+ - ENV variables:
+ - CM_PYTHON_INSTALL_CACHE_TAGS: `shared`
+ - CM_SHARED_BUILD: `yes`
+ * `_with-custom-ssl`
+ - ENV variables:
+ - CM_CUSTOM_SSL: `yes`
+ - CM_PYTHON_INSTALL_CACHE_TAGS: `with-custom-ssl`
+
+
+
+
+ * Group "**ssl**"
+
+ Click here to expand this section.
+
+ * `_with-ssl`
+ - ENV variables:
+ - CM_ENABLE_SSL: `yes`
+ - CM_PYTHON_INSTALL_CACHE_TAGS: `with-ssl`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_ENABLE_SSL: `no`
+ * CM_CUSTOM_SSL: `no`
+ * CM_SHARED_BUILD: `no`
+ * CM_PYTHON_OPTIMIZATION_FLAG: ``
+ * CM_PYTHON_LTO_FLAG: ``
+ * CM_WGET_URL: `https://www.python.org/ftp/python/[PYTHON_VERSION]/Python-[PYTHON_VERSION].tgz`
+
+
+#### Versions
+Default version: `3.10.13`
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-src/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "install src python python3 src-python3 src-python [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-python-venv/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-python-venv/index.md
new file mode 100644
index 0000000000..f097aa1962
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-python-venv/index.md
@@ -0,0 +1,103 @@
+# install-python-venv
+Automatically generated README for this automation recipe: **install-python-venv**
+
+Category: **[Python automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-venv/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "install python get-python-venv python-venv" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=install,python,get-python-venv,python-venv[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "install python get-python-venv python-venv [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'install,python,get-python-venv,python-venv'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "install python get-python-venv python-venv[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_lto`
+ * `_optimized`
+ * `_shared`
+ * `_with-custom-ssl`
+ * `_with-ssl`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-venv/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-venv/run.bat)
+___
+#### Script output
+```bash
+cmr "install python get-python-venv python-venv [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Remote-automation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Remote-automation/index.md
new file mode 100644
index 0000000000..754c07ddf2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Remote-automation/index.md
@@ -0,0 +1 @@
+* [remote-run-commands](remote-run-commands/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Remote-automation/remote-run-commands/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Remote-automation/remote-run-commands/index.md
new file mode 100644
index 0000000000..afb2ccf0c4
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Remote-automation/remote-run-commands/index.md
@@ -0,0 +1,117 @@
+# remote-run-commands
+Automatically generated README for this automation recipe: **remote-run-commands**
+
+Category: **[Remote automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/remote-run-commands/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/remote-run-commands/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "remote run cmds remote-run remote-run-cmds ssh-run ssh" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=remote,run,cmds,remote-run,remote-run-cmds,ssh-run,ssh [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "remote run cmds remote-run remote-run-cmds ssh-run ssh " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'remote,run,cmds,remote-run,remote-run-cmds,ssh-run,ssh'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "remote run cmds remote-run remote-run-cmds ssh-run ssh" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--client_refresh=value` → `CM_SSH_CLIENT_REFRESH=value`
+ * `--host=value` → `CM_SSH_HOST=value`
+ * `--password=value` → `CM_SSH_PASSWORD=value`
+ * `--port=value` → `CM_SSH_PORT=value`
+ * `--run_cmds=value` → `CM_SSH_RUN_COMMANDS=value`
+ * `--skip_host_verify=value` → `CM_SSH_SKIP_HOST_VERIFY=value`
+ * `--ssh_key_file=value` → `CM_SSH_KEY_FILE=value`
+ * `--user=value` → `CM_SSH_USER=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_SSH_PORT: `22`
+ * CM_SSH_HOST: `localhost`
+ * CM_SSH_USER: `$USER`
+ * CM_SSH_CLIENT_REFRESH: `10`
+ * CM_SSH_KEY_FILE: `$HOME/.ssh/id_rsa`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/remote-run-commands/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/remote-run-commands/run.bat)
+___
+#### Script output
+```bash
+cmr "remote run cmds remote-run remote-run-cmds ssh-run ssh " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/app-mlperf-inference-nvidia/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/app-mlperf-inference-nvidia/index.md
new file mode 100644
index 0000000000..f443182972
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/app-mlperf-inference-nvidia/index.md
@@ -0,0 +1,652 @@
+# app-mlperf-inference-nvidia
+Automatically generated README for this automation recipe: **app-mlperf-inference-nvidia**
+
+Category: **[Reproduce MLPerf benchmarks](..)**
+
+License: **Apache 2.0**
+
+
+
+---
+
+This script is a CM wrapper to the official [Nvidia submission code](https://github.com/mlcommons/inference_results_v3.0/tree/master/closed/NVIDIA) used for MLPerf inference submissions.
+
+
+
+## Download the needed files
+
+* Please ask privately in [this discord channel](https://discord.gg/y7hupJsUNb) if you would like to get access to an Amazon S3 bucket containing all the needed files for easiness. Otherwise, you can download them from the below links.
+
+For x86 machines, please download the latest install tar files from the below sites
+1. [cuDNN](https://developer.nvidia.com/cudnn) (for cuda 11)
+2. [TensorRT](https://developer.nvidia.com/tensorrt)
+3. Imagenet validation set (unfortunately not available via public URL) following the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md)
+
+
+
+
+
+## Using Docker (Recommended on x86 systems)
+
+
+Assuming all the downloaded files are to the user home directory please do the following steps:
+
+1. Download CUDA 11.8
+ ```
+ wget https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run
+ ```
+2. [Install docker](https://docs.docker.com/engine/install/) and [Nvidia container toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)
+
+3. Give docker permission to the current user
+ ```
+ sudo usermod -aG docker $USER
+ ```
+ Logout and login
+ Restart docker if required and confirm that Nvidia container toolkit is working by
+ ```
+ nvidia-ctk --version
+ ```
+4. Check if Nvidia driver is working properly on the host.
+ ```
+ nvidia-smi
+ ```
+ If the above command produces any error you'll need to install Nvidia drivers on the host. You can do this via CM if you have sudo access
+ ```
+ cmr "install cuda prebuilt _driver" --version=11.8.0
+ ```
+5. Build the docker container and mount the paths from the host machine.
+ ** You may want to change the `scratch_path` location as it can take 100s of GBs.**
+ ```bash
+ cm docker script --tags=build,nvidia,inference,server \
+ --cuda_run_file_path=$HOME/cuda_11.8.0_520.61.05_linux.run \
+ --tensorrt_tar_file_path=$HOME/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz \
+ --cudnn_tar_file_path=$HOME/cudnn-linux-x86_64-8.9.2.26_cuda11-archive.tar.xz \
+ --imagenet_path=$HOME/imagenet-2012-val \
+ --scratch_path=$HOME/mlperf_scratch \
+ --docker_cm_repo=mlcommons@cm4mlops \
+ --results_dir=$HOME/results_dir \
+ --submission_dir=$HOME/submission_dir \
+ --adr.compiler.tags=gcc
+ ```
+ * Use `--docker_cache=no` to turn off docker caching
+ * Use `--docker_run_cmd_prefix="cm pull repo mlcommons@cm4mlops --checkout=dev"` to update the CK repository when docker caching is used
+ * Use `--custom_system=no` if you are using a similar system to the [Nvidia submission systems for MLPerf inference 3.0](https://github.com/mlcommons/inference_results_v3.0/tree/main/closed/NVIDIA/systems).
+
+6. At the end of the build you'll get the following prompt unless you have chosen `--custom_system=no`. Please give a system name and say yes to generating the configuration files
+ ### Example output
+ ```
+ ============================================
+ => A system ID is a string containing only letters, numbers, and underscores
+ => that is used as the human-readable name of the system. It is also used as
+ => the system name when creating the measurements/ and results/ entries.
+ => This string should also start with a letter to be a valid Python enum member name.
+ => Specify the system ID to use for the current system: phoenix
+ => Reloaded system list. MATCHED_SYSTEM: KnownSystem.phoenix
+ => This script will generate Benchmark Configuration stubs for the detected system.
+ Continue? [y/n]: y
+ ```
+ Now you'll be inside the CM Nvidia docker container and can run further scripts.
+
+7. Once the build is complete, you can proceed with any further CM scripts like for MLPerf inference. You can also save the container at this stage using [docker commit](https://docs.docker.com/engine/reference/commandline/commit/) so that it can be launched later without having to go through the previous steps.
+
+
+
+
+
+
+
+## Without Docker
+
+
+1. Install CUDA
+ If CUDA is not detected, CM should download and install it automatically when you run the workflow.
+ ** Nvidia drivers are expected to be installed on the system **
+
+2. Install cuDNN
+ ```bash
+ cmr "get cudnn" --tar_file=
+ ```
+3. Install TensorRT
+ ```bash
+ cmr "get tensorrt _dev" --tar_file=
+ ```
+ On non x86 systems like Nvidia Orin, you can do a package manager install and then CM should pick up the installation automatically during the workflow run.
+
+4. Build the Nvidia inference server
+ ```
+ cmr "build nvidia inference server" \
+ --adr.install-cuda-prebuilt.local_run_file_path=/data/cuda_11.8.0_520.61.05_linux.run \
+ --adr.tensorrt.tar_file=/data/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz \
+ --adr.cudnn.tar_file=/data/cudnn-linux-x86_64-8.9.2.26_cuda11-archive.tar.xz \
+ --adr.compiler.tags=gcc \
+ [--custom_system=no]
+ ```
+ Use `--custom_system=no` if you are using a similar system to the [Nvidia submission systems for MLPerf inference 3.0](https://github.com/mlcommons/inference_results_v3.0/tree/main/closed/NVIDIA/systems).
+
+5. At the end of the build you'll get the following prompt unless you have chosen `--custom_system=no`. Please give a system name and say yes to generating the configuration files
+
+ ### Example output
+ ```
+ ============================================
+ => A system ID is a string containing only letters, numbers, and underscores
+ => that is used as the human-readable name of the system. It is also used as
+ => the system name when creating the measurements/ and results/ entries.
+ => This string should also start with a letter to be a valid Python enum member name.
+ => Specify the system ID to use for the current system: phoenix
+ => Reloaded system list. MATCHED_SYSTEM: KnownSystem.phoenix
+ => This script will generate Benchmark Configuration stubs for the detected system.
+ Continue? [y/n]: y
+ ```
+
+
+
+## Acknowledgments
+
+* A common CM interface and automation for MLPerf inference benchmarks was developed by Arjun Suresh and Grigori Fursin
+ sponsored by the [cTuning foundation](https://cTuning.org) and [cKnowledge.org](https://cKnowledge.org).
+* Nvidia's MLPerf inference implementation was developed by Zhihan Jiang, Ethan Cheng, Yiheng Zhang and Jinho Suh.
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-nvidia/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "reproduce mlcommons mlperf inference harness nvidia-harness nvidia" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,nvidia-harness,nvidia[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "reproduce mlcommons mlperf inference harness nvidia-harness nvidia [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'reproduce,mlcommons,mlperf,inference,harness,nvidia-harness,nvidia'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "reproduce mlcommons mlperf inference harness nvidia-harness nvidia[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_run-harness`
+ * `_v3.1`
+ - ENV variables:
+ - CM_MLPERF_INFERENCE_VERSION: `v3.1`
+ - CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: `GPTJ-07142023.pth`
+
+
+
+
+ * Group "**backend**"
+
+ Click here to expand this section.
+
+ * **`_tensorrt`** (default)
+ - ENV variables:
+ - CM_MLPERF_BACKEND: `tensorrt`
+ - CM_MLPERF_BACKEND_NAME: `TensorRT`
+
+
+
+
+ * Group "**batch-size**"
+
+ Click here to expand this section.
+
+ * `_batch_size.#`
+ - ENV variables:
+ - CM_MODEL_BATCH_SIZE: `#`
+ - CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: `#`
+
+
+
+
+ * Group "**build-engine-options**"
+
+ Click here to expand this section.
+
+ * `_build_engine_options.#`
+ - ENV variables:
+ - CM_MLPERF_NVIDIA_HARNESS_EXTRA_BUILD_ENGINE_OPTIONS: `#`
+
+
+
+
+ * Group "**device**"
+
+ Click here to expand this section.
+
+ * `_cpu`
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `cpu`
+ * **`_cuda`** (default)
+ - ENV variables:
+ - CM_MLPERF_DEVICE: `gpu`
+ - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart`
+
+
+
+
+ * Group "**device-memory**"
+
+ Click here to expand this section.
+
+ * `_gpu_memory.16`
+ - ENV variables:
+ - CM_NVIDIA_GPU_MEMORY: `16`
+ * `_gpu_memory.24`
+ - ENV variables:
+ - CM_NVIDIA_GPU_MEMORY: `24`
+ * `_gpu_memory.32`
+ - ENV variables:
+ - CM_NVIDIA_GPU_MEMORY: `32`
+ * `_gpu_memory.40`
+ - ENV variables:
+ - CM_NVIDIA_GPU_MEMORY: `40`
+ * `_gpu_memory.48`
+ - ENV variables:
+ - CM_NVIDIA_GPU_MEMORY: `48`
+ * `_gpu_memory.8`
+ - ENV variables:
+ - CM_NVIDIA_GPU_MEMORY: `8`
+ * `_gpu_memory.80`
+ - ENV variables:
+ - CM_NVIDIA_GPU_MEMORY: `80`
+
+
+
+
+ * Group "**dla-batch-size**"
+
+ Click here to expand this section.
+
+ * `_dla_batch_size.#`
+ - ENV variables:
+ - CM_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE: `#`
+ - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX2: `dla_batch_size.#`
+
+
+
+
+ * Group "**gpu-connection**"
+
+ Click here to expand this section.
+
+ * `_pcie`
+ * `_sxm`
+
+
+
+
+ * Group "**gpu-name**"
+
+ Click here to expand this section.
+
+ * `_a100`
+ - ENV variables:
+ - CM_NVIDIA_CUSTOM_GPU: `yes`
+ * `_a6000`
+ - ENV variables:
+ - CM_NVIDIA_CUSTOM_GPU: `yes`
+ * `_custom`
+ - ENV variables:
+ - CM_NVIDIA_CUSTOM_GPU: `yes`
+ - CM_MODEL_BATCH_SIZE: ``
+ - CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: `<<>>`
+ * `_l4`
+ - ENV variables:
+ - CM_NVIDIA_CUSTOM_GPU: `yes`
+ * `_orin`
+ - ENV variables:
+ - CM_NVIDIA_CUSTOM_GPU: `yes`
+ - CM_MODEL_BATCH_SIZE: ``
+ - CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: `<<>>`
+ * `_rtx_4090`
+ - ENV variables:
+ - CM_NVIDIA_CUSTOM_GPU: `yes`
+ * `_rtx_6000_ada`
+ - ENV variables:
+ - CM_NVIDIA_CUSTOM_GPU: `yes`
+ * `_t4`
+ - ENV variables:
+ - CM_NVIDIA_CUSTOM_GPU: `yes`
+
+
+
+
+ * Group "**loadgen-scenario**"
+
+ Click here to expand this section.
+
+ * `_multistream`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream`
+ * `_offline`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `Offline`
+ * `_server`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `Server`
+ * `_singlestream`
+ - ENV variables:
+ - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream`
+ - CUDA_VISIBLE_DEVICES_NOT_USED: `0`
+
+
+
+
+ * Group "**model**"
+
+ Click here to expand this section.
+
+ * `_3d-unet-99`
+ - ENV variables:
+ - CM_MODEL: `3d-unet-99`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.onnx`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `int8`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8`
+ * `_3d-unet-99.9`
+ - ENV variables:
+ - CM_MODEL: `3d-unet-99.9`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.onnx`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `int8`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8`
+ * `_bert-99`
+ - ENV variables:
+ - CM_MODEL: `bert-99`
+ - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `int32`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8`
+ * `_bert-99.9`
+ - ENV variables:
+ - CM_MODEL: `bert-99.9`
+ - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3733910/files/model.onnx`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `int32`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp16`
+ * `_dlrm-v2-99`
+ - ENV variables:
+ - CM_MODEL: `dlrm-v2-99`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `affine fusion`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp16`
+ * `_dlrm-v2-99.9`
+ - ENV variables:
+ - CM_MODEL: `dlrm-v2-99.9`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `affine fusion`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp16`
+ * `_gptj-99`
+ - ENV variables:
+ - CM_MODEL: `gptj-99`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `int32`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp16`
+ * `_gptj-99.9`
+ - ENV variables:
+ - CM_MODEL: `gptj-99.9`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `int32`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp16`
+ * **`_resnet50`** (default)
+ - ENV variables:
+ - CM_MODEL: `resnet50`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `int8`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8`
+ * `_retinanet`
+ - ENV variables:
+ - CM_MODEL: `retinanet`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `int8`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8`
+ * `_rnnt`
+ - ENV variables:
+ - CM_MODEL: `rnnt`
+ - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt`
+ - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion`
+ - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp16`
+ - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp16`
+
+
+
+
+ * Group "**num-gpus**"
+
+ Click here to expand this section.
+
+ * `_num-gpus.#`
+ - ENV variables:
+ - CM_NVIDIA_NUM_GPUS: `#`
+ * **`_num-gpus.1`** (default)
+ - ENV variables:
+ - CM_NVIDIA_NUM_GPUS: `1`
+
+
+
+
+ * Group "**power-mode**"
+
+ Click here to expand this section.
+
+ * `_maxn`
+ - ENV variables:
+ - CM_MLPERF_NVIDIA_HARNESS_MAXN: `True`
+ * `_maxq`
+ - ENV variables:
+ - CM_MLPERF_NVIDIA_HARNESS_MAXQ: `True`
+
+
+
+
+ * Group "**run-mode**"
+
+ Click here to expand this section.
+
+ * `_build`
+ - ENV variables:
+ - MLPERF_NVIDIA_RUN_COMMAND: `build`
+ - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `build`
+ * `_build_engine`
+ - Aliases: `_build-engine`
+ - ENV variables:
+ - MLPERF_NVIDIA_RUN_COMMAND: `generate_engines`
+ - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `generate_engines`
+ * `_calibrate`
+ - ENV variables:
+ - MLPERF_NVIDIA_RUN_COMMAND: `calibrate`
+ - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `calibrate`
+ * `_download_model`
+ - ENV variables:
+ - MLPERF_NVIDIA_RUN_COMMAND: `download_model`
+ - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `download_model`
+ * `_prebuild`
+ - ENV variables:
+ - MLPERF_NVIDIA_RUN_COMMAND: `prebuild`
+ - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `prebuild`
+ * `_preprocess_data`
+ - ENV variables:
+ - MLPERF_NVIDIA_RUN_COMMAND: `preprocess_data`
+ - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `preprocess_data`
+ * **`_run_harness`** (default)
+ - ENV variables:
+ - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `run_harness`
+ - MLPERF_NVIDIA_RUN_COMMAND: `run_harness`
+ - CM_CALL_MLPERF_RUNNER: `yes`
+
+
+
+
+ * Group "**triton**"
+
+ Click here to expand this section.
+
+ * `_use_triton`
+ - ENV variables:
+ - CM_MLPERF_NVIDIA_HARNESS_USE_TRITON: `yes`
+ - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX3: `using_triton`
+
+
+
+
+ * Group "**version**"
+
+ Click here to expand this section.
+
+ * **`_v4.0`** (default)
+ - ENV variables:
+ - CM_MLPERF_INFERENCE_VERSION: `v4.0`
+ - CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: `GPTJ-FP8-quantized`
+
+
+
+
+ ##### Default variations
+
+ `_cuda,_num-gpus.1,_resnet50,_run_harness,_tensorrt,_v4.0`
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--audio_buffer_num_lines=value` → `CM_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES=value`
+ * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value`
+ * `--deque_timeout_usec=value` → `CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC=value`
+ * `--devices=value` → `CM_MLPERF_NVIDIA_HARNESS_DEVICES=value`
+ * `--dla_batch_size=value` → `CM_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE=value`
+ * `--dla_copy_streams=value` → `CM_MLPERF_NVIDIA_HARNESS_DLA_COPY_STREAMS=value`
+ * `--dla_inference_streams=value` → `CM_MLPERF_NVIDIA_HARNESS_DLA_INFERENCE_STREAMS=value`
+ * `--embedding_weights_on_gpu_part=value` → `CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART=value`
+ * `--enable_sort=value` → `CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT=value`
+ * `--end_on_device=value` → `CM_MLPERF_NVIDIA_HARNESS_END_ON_DEVICE=value`
+ * `--extra_run_options=value` → `CM_MLPERF_NVIDIA_HARNESS_EXTRA_RUN_OPTIONS=value`
+ * `--gpu_batch_size=value` → `CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE=value`
+ * `--gpu_copy_streams=value` → `CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS=value`
+ * `--gpu_inference_streams=value` → `CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS=value`
+ * `--graphs_max_seqlen=value` → `CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN=value`
+ * `--input_format=value` → `CM_MLPERF_NVIDIA_HARNESS_INPUT_FORMAT=value`
+ * `--log_dir=value` → `CM_MLPERF_NVIDIA_HARNESS_LOG_DIR=value`
+ * `--make_cmd=value` → `MLPERF_NVIDIA_RUN_COMMAND=value`
+ * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value`
+ * `--max_dlas=value` → `CM_MLPERF_NVIDIA_HARNESS_MAX_DLAS=value`
+ * `--mlperf_conf=value` → `CM_MLPERF_CONF=value`
+ * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value`
+ * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value`
+ * `--num_issue_query_threads=value` → `CM_MLPERF_NVIDIA_HARNESS_NUM_ISSUE_QUERY_THREADS=value`
+ * `--num_sort_segments=value` → `CM_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS=value`
+ * `--num_warmups=value` → `CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS=value`
+ * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value`
+ * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value`
+ * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value`
+ * `--power_setting=value` → `CM_MLPERF_NVIDIA_HARNESS_POWER_SETTING=value`
+ * `--rerun=value` → `CM_RERUN=value`
+ * `--run_infer_on_copy_streams=value` → `CM_MLPERF_NVIDIA_HARNESS_RUN_INFER_ON_COPY_STREAMS=value`
+ * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value`
+ * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value`
+ * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value`
+ * `--skip_postprocess=value` → `CM_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS=value`
+ * `--skip_preprocess=value` → `CM_SKIP_PREPROCESS_DATASET=value`
+ * `--skip_preprocessing=value` → `CM_SKIP_PREPROCESS_DATASET=value`
+ * `--soft_drop=value` → `CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP=value`
+ * `--start_from_device=value` → `CM_MLPERF_NVIDIA_HARNESS_START_FROM_DEVICE=value`
+ * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value`
+ * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value`
+ * `--use_cuda_thread_per_device=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE=value`
+ * `--use_deque_limit=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT=value`
+ * `--use_fp8=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_FP8=value`
+ * `--use_graphs=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS=value`
+ * `--use_small_tile_gemm_plugin=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN=value`
+ * `--use_triton=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_TRITON=value`
+ * `--user_conf=value` → `CM_MLPERF_USER_CONF=value`
+ * `--workspace_size=value` → `CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_BATCH_COUNT: `1`
+ * CM_BATCH_SIZE: `1`
+ * CM_FAST_COMPILATION: `yes`
+ * CM_MLPERF_LOADGEN_SCENARIO: `Offline`
+ * CM_MLPERF_LOADGEN_MODE: `performance`
+ * CM_SKIP_PREPROCESS_DATASET: `no`
+ * CM_SKIP_MODEL_DOWNLOAD: `no`
+ * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `nvidia_original`
+ * CM_MLPERF_SKIP_RUN: `no`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-nvidia/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "reproduce mlcommons mlperf inference harness nvidia-harness nvidia [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/index.md
new file mode 100644
index 0000000000..6db8a9a3ed
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/index.md
@@ -0,0 +1,4 @@
+* [app-mlperf-inference-nvidia](app-mlperf-inference-nvidia/index.md)
+* [reproduce-mlperf-octoml-tinyml-results](reproduce-mlperf-octoml-tinyml-results/index.md)
+* [reproduce-mlperf-training-nvidia](reproduce-mlperf-training-nvidia/index.md)
+* [wrapper-reproduce-octoml-tinyml-submission](wrapper-reproduce-octoml-tinyml-submission/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-octoml-tinyml-results/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-octoml-tinyml-results/index.md
new file mode 100644
index 0000000000..d32b17538b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-octoml-tinyml-results/index.md
@@ -0,0 +1,137 @@
+# reproduce-mlperf-octoml-tinyml-results
+Automatically generated README for this automation recipe: **reproduce-mlperf-octoml-tinyml-results**
+
+Category: **[Reproduce MLPerf benchmarks](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-octoml-tinyml-results/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-octoml-tinyml-results/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "reproduce tiny results mlperf octoml mlcommons" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=reproduce,tiny,results,mlperf,octoml,mlcommons[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "reproduce tiny results mlperf octoml mlcommons [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'reproduce,tiny,results,mlperf,octoml,mlcommons'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "reproduce tiny results mlperf octoml mlcommons[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_NRF`
+ - ENV variables:
+ - CM_TINY_BOARD: `NRF5340DK`
+ * `_NUCLEO`
+ - ENV variables:
+ - CM_TINY_BOARD: `NUCLEO_L4R5ZI`
+ * `_ad`
+ - ENV variables:
+ - CM_TINY_MODEL: `ad`
+ * `_cmsis_nn`
+ - ENV variables:
+ - CM_MICROTVM_VARIANT: `microtvm_cmsis_nn`
+ * `_ic`
+ - ENV variables:
+ - CM_TINY_MODEL: `ic`
+ * `_kws`
+ - ENV variables:
+ - CM_TINY_MODEL: `kws`
+ * `_native`
+ - ENV variables:
+ - CM_MICROTVM_VARIANT: `microtvm_native`
+ * `_vww`
+ - ENV variables:
+ - CM_TINY_MODEL: `vww`
+
+
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--flash=value` → `CM_FLASH_BOARD=value`
+ * `--recreate_binary=value` → `CM_RECREATE_BINARY=value`
+
+
+
+#### Versions
+Default version: `r1.0`
+
+* `r1.0`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-octoml-tinyml-results/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "reproduce tiny results mlperf octoml mlcommons [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-training-nvidia/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-training-nvidia/index.md
new file mode 100644
index 0000000000..8b461ba108
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-training-nvidia/index.md
@@ -0,0 +1,115 @@
+# reproduce-mlperf-training-nvidia
+Automatically generated README for this automation recipe: **reproduce-mlperf-training-nvidia**
+
+Category: **[Reproduce MLPerf benchmarks](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-training-nvidia/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "reproduce mlcommons mlperf train training nvidia-training nvidia" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=reproduce,mlcommons,mlperf,train,training,nvidia-training,nvidia[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "reproduce mlcommons mlperf train training nvidia-training nvidia [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'reproduce,mlcommons,mlperf,train,training,nvidia-training,nvidia'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "reproduce mlcommons mlperf train training nvidia-training nvidia[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**benchmark**"
+
+ Click here to expand this section.
+
+ * `_resnet`
+ - ENV variables:
+ - CM_MLPERF_TRAINING_BENCHMARK: `resnet`
+
+
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--results_dir=value` → `CM_MLPERF_RESULTS_DIR=value`
+ * `--system_conf_name=value` → `CM_MLPERF_NVIDIA_TRAINING_SYSTEM_CONF_NAME=value`
+
+
+
+#### Versions
+* `r2.1`
+* `r3.0`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-resnet.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-training-nvidia/run-resnet.sh)
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-training-nvidia/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "reproduce mlcommons mlperf train training nvidia-training nvidia [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/wrapper-reproduce-octoml-tinyml-submission/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/wrapper-reproduce-octoml-tinyml-submission/index.md
new file mode 100644
index 0000000000..2a69d5c1c4
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/wrapper-reproduce-octoml-tinyml-submission/index.md
@@ -0,0 +1,101 @@
+# wrapper-reproduce-octoml-tinyml-submission
+Automatically generated README for this automation recipe: **wrapper-reproduce-octoml-tinyml-submission**
+
+Category: **[Reproduce MLPerf benchmarks](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/wrapper-reproduce-octoml-tinyml-submission/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/wrapper-reproduce-octoml-tinyml-submission/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "run generate-tiny generate submission tiny generate-tiny-submission results mlcommons mlperf octoml" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=run,generate-tiny,generate,submission,tiny,generate-tiny-submission,results,mlcommons,mlperf,octoml [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "run generate-tiny generate submission tiny generate-tiny-submission results mlcommons mlperf octoml " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'run,generate-tiny,generate,submission,tiny,generate-tiny-submission,results,mlcommons,mlperf,octoml'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "run generate-tiny generate submission tiny generate-tiny-submission results mlcommons mlperf octoml" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--flash=value` → `CM_FLASH_BOARD=value`
+ * `--recreate_binary=value` → `CM_RECREATE_BINARY=value`
+
+
+
+#### Versions
+Default version: `r1.0`
+
+* `r1.0`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/wrapper-reproduce-octoml-tinyml-submission/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "run generate-tiny generate submission tiny generate-tiny-submission results mlcommons mlperf octoml " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/get-ipol-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/get-ipol-src/index.md
new file mode 100644
index 0000000000..ba254ab2b1
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/get-ipol-src/index.md
@@ -0,0 +1,98 @@
+# get-ipol-src
+Automatically generated README for this automation recipe: **get-ipol-src**
+
+Category: **[Reproducibility and artifact evaluation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ipol-src/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ipol-src/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get ipol journal src ipol-src" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,ipol,journal,src,ipol-src [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get ipol journal src ipol-src " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,ipol,journal,src,ipol-src'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get ipol journal src ipol-src" [--input_flags]
+ ```
+___
+
+=== "Input Flags"
+
+
+ #### Input Flags
+
+ * --**number:** IPOL publication number
+ * --**year:** IPOL publication year
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--number=value` → `CM_IPOL_NUMBER=value`
+ * `--year=value` → `CM_IPOL_YEAR=value`
+
+
+
+
+___
+#### Script output
+```bash
+cmr "get ipol journal src ipol-src " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/index.md
new file mode 100644
index 0000000000..6803c39f98
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/index.md
@@ -0,0 +1,4 @@
+* [get-ipol-src](get-ipol-src/index.md)
+* [process-ae-users](process-ae-users/index.md)
+* [reproduce-ipol-paper-2022-439](reproduce-ipol-paper-2022-439/index.md)
+* [reproduce-micro-paper-2023-victima](reproduce-micro-paper-2023-victima/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/process-ae-users/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/process-ae-users/index.md
new file mode 100644
index 0000000000..51e9a4f917
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/process-ae-users/index.md
@@ -0,0 +1,95 @@
+# process-ae-users
+Automatically generated README for this automation recipe: **process-ae-users**
+
+Category: **[Reproducibility and artifact evaluation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/process-ae-users/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "process ae users" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=process,ae,users [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "process ae users " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'process,ae,users'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "process ae users" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--file=value` → `CM_PROCESS_AE_USERS_INPUT_FILE=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/process-ae-users/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/process-ae-users/run.bat)
+___
+#### Script output
+```bash
+cmr "process ae users " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/reproduce-ipol-paper-2022-439/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/reproduce-ipol-paper-2022-439/index.md
new file mode 100644
index 0000000000..d0298a09c1
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/reproduce-ipol-paper-2022-439/index.md
@@ -0,0 +1,97 @@
+# reproduce-ipol-paper-2022-439
+Automatically generated README for this automation recipe: **reproduce-ipol-paper-2022-439**
+
+Category: **[Reproducibility and artifact evaluation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-ipol-paper-2022-439/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-ipol-paper-2022-439/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "app python reproduce project paper ipol journal repro reproducibility pytorch 2022-439" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=app,python,reproduce,project,paper,ipol,journal,repro,reproducibility,pytorch,2022-439 [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "app python reproduce project paper ipol journal repro reproducibility pytorch 2022-439 " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'app,python,reproduce,project,paper,ipol,journal,repro,reproducibility,pytorch,2022-439'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "app python reproduce project paper ipol journal repro reproducibility pytorch 2022-439" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--image1=value` → `CM_IMAGE_1=value`
+ * `--image2=value` → `CM_IMAGE_2=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-ipol-paper-2022-439/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-ipol-paper-2022-439/run.bat)
+___
+#### Script output
+```bash
+cmr "app python reproduce project paper ipol journal repro reproducibility pytorch 2022-439 " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/reproduce-micro-paper-2023-victima/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/reproduce-micro-paper-2023-victima/index.md
new file mode 100644
index 0000000000..461210341b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/reproduce-micro-paper-2023-victima/index.md
@@ -0,0 +1,123 @@
+# reproduce-micro-paper-2023-victima
+Automatically generated README for this automation recipe: **reproduce-micro-paper-2023-victima**
+
+Category: **[Reproducibility and artifact evaluation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-micro-paper-2023-victima/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-micro-paper-2023-victima/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "reproduce project paper micro micro-2023 victima" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=reproduce,project,paper,micro,micro-2023,victima[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "reproduce project paper micro micro-2023 victima [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'reproduce,project,paper,micro,micro-2023,victima'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "reproduce project paper micro micro-2023 victima[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_install_deps`
+ * `_plot`
+ * `_run`
+
+
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--container=value` → `CM_VICTIMA_CONTAINER=value`
+ * `--job_manager=value` → `CM_VICTIMA_JOB_MANAGER=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_VICTIMA_JOB_MANAGER: `native`
+ * CM_VICTIMA_CONTAINER: `docker`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-micro-paper-2023-victima/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "reproduce project paper micro micro-2023 victima [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/index.md
new file mode 100644
index 0000000000..c7d48602b2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/index.md
@@ -0,0 +1,15 @@
+* [print-any-text](print-any-text/index.md)
+* [print-croissant-desc](print-croissant-desc/index.md)
+* [print-hello-world](print-hello-world/index.md)
+* [print-hello-world-java](print-hello-world-java/index.md)
+* [print-hello-world-javac](print-hello-world-javac/index.md)
+* [print-hello-world-py](print-hello-world-py/index.md)
+* [print-python-version](print-python-version/index.md)
+* [run-python](run-python/index.md)
+* [test-cm-core](test-cm-core/index.md)
+* [test-cm-script-pipeline](test-cm-script-pipeline/index.md)
+* [test-deps-conditions](test-deps-conditions/index.md)
+* [test-deps-conditions2](test-deps-conditions2/index.md)
+* [test-download-and-extract-artifacts](test-download-and-extract-artifacts/index.md)
+* [test-set-sys-user-cm](test-set-sys-user-cm/index.md)
+* [upgrade-python-pip](upgrade-python-pip/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-any-text/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-any-text/index.md
new file mode 100644
index 0000000000..3a924de551
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-any-text/index.md
@@ -0,0 +1,129 @@
+# print-any-text
+Automatically generated README for this automation recipe: **print-any-text**
+
+Category: **[Tests](..)**
+
+License: **Apache 2.0**
+
+Developers: Grigori Fursin
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/print-any-text/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "print any-text" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=print,any-text[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "print any-text [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'print,any-text'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "print any-text[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_cm_env.#`
+ - ENV variables:
+ - CM_PRINT_ANY_CM_ENV_KEYS: `#`
+ * `_os_env.#`
+ - ENV variables:
+ - CM_PRINT_ANY_OS_ENV_KEYS: `#`
+ * `_text.#`
+ - ENV variables:
+ - CM_PRINT_ANY_TEXT: `#`
+
+
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--cm_env_keys=value` → `CM_PRINT_ANY_CM_ENV_KEYS=value`
+ * `--os_env_keys=value` → `CM_PRINT_ANY_OS_ENV_KEYS=value`
+ * `--text=value` → `CM_PRINT_ANY_TEXT=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_PRINT_ANY_TEXT: ``
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-any-text/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-any-text/run.bat)
+___
+#### Script output
+```bash
+cmr "print any-text [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-croissant-desc/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-croissant-desc/index.md
new file mode 100644
index 0000000000..2533d905d7
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-croissant-desc/index.md
@@ -0,0 +1,106 @@
+# print-croissant-desc
+Automatically generated README for this automation recipe: **print-croissant-desc**
+
+Category: **[Tests](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/print-croissant-desc/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/print-croissant-desc/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "print croissant desc" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=print,croissant,desc [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "print croissant desc " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'print,croissant,desc'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "print croissant desc" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--url=value` → `CM_PRINT_CROISSANT_URL=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_PRINT_CROISSANT_URL: `https://raw.githubusercontent.com/mlcommons/croissant/main/datasets/1.0/gpt-3/metadata.json`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-croissant-desc/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-croissant-desc/run.bat)
+___
+#### Script output
+```bash
+cmr "print croissant desc " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-java/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-java/index.md
new file mode 100644
index 0000000000..56a73326b0
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-java/index.md
@@ -0,0 +1,86 @@
+# print-hello-world-java
+Automatically generated README for this automation recipe: **print-hello-world-java**
+
+Category: **[Tests](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-java/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "print hello world hello-world hello world java" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=print,hello world,hello-world,hello,world,java
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "print hello world hello-world hello world java "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'print,hello world,hello-world,hello,world,java'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "print hello world hello-world hello world java"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-java/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-java/run.bat)
+___
+#### Script output
+```bash
+cmr "print hello world hello-world hello world java " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-javac/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-javac/index.md
new file mode 100644
index 0000000000..0166b29cdf
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-javac/index.md
@@ -0,0 +1,86 @@
+# print-hello-world-javac
+Automatically generated README for this automation recipe: **print-hello-world-javac**
+
+Category: **[Tests](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-javac/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "print hello world hello-world hello world javac" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=print,hello world,hello-world,hello,world,javac
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "print hello world hello-world hello world javac "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'print,hello world,hello-world,hello,world,javac'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "print hello world hello-world hello world javac"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-javac/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-javac/run.bat)
+___
+#### Script output
+```bash
+cmr "print hello world hello-world hello world javac " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-py/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-py/index.md
new file mode 100644
index 0000000000..e753b2fd81
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-py/index.md
@@ -0,0 +1,86 @@
+# print-hello-world-py
+Automatically generated README for this automation recipe: **print-hello-world-py**
+
+Category: **[Tests](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-py/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "print hello world hello-world hello world python" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=print,hello world,hello-world,hello,world,python
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "print hello world hello-world hello world python "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'print,hello world,hello-world,hello,world,python'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "print hello world hello-world hello world python"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-py/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-py/run.bat)
+___
+#### Script output
+```bash
+cmr "print hello world hello-world hello world python " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world/index.md
new file mode 100644
index 0000000000..d0bba05baa
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world/index.md
@@ -0,0 +1,123 @@
+# print-hello-world
+Automatically generated README for this automation recipe: **print-hello-world**
+
+Category: **[Tests](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "print hello-world hello world hello world native-script native script" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=print,hello-world,hello world,hello,world,native-script,native,script[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "print hello-world hello world hello world native-script native script [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'print,hello-world,hello world,hello,world,native-script,native,script'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "print hello-world hello world hello world native-script native script[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_skip_print_env`
+ - ENV variables:
+ - CM_PRINT_HELLO_WORLD_SKIP_PRINT_ENV: `yes`
+ * `_text.#`
+ - ENV variables:
+ - CM_PRINT_HELLO_WORLD_TEXT: `#`
+
+
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--test1=value` → `CM_ENV_TEST1=value`
+
+
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_ENV_TEST1: `TEST1`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world/run.bat)
+___
+#### Script output
+```bash
+cmr "print hello-world hello world hello world native-script native script [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-python-version/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-python-version/index.md
new file mode 100644
index 0000000000..6fd14d421c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-python-version/index.md
@@ -0,0 +1,86 @@
+# print-python-version
+Automatically generated README for this automation recipe: **print-python-version**
+
+Category: **[Tests](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/print-python-version/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "print python version python-version" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=print,python,version,python-version
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "print python version python-version "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'print,python,version,python-version'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "print python version python-version"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-python-version/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-python-version/run.bat)
+___
+#### Script output
+```bash
+cmr "print python version python-version " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/run-python/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/run-python/index.md
new file mode 100644
index 0000000000..be4b0fa0f1
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/run-python/index.md
@@ -0,0 +1,95 @@
+# run-python
+Automatically generated README for this automation recipe: **run-python**
+
+Category: **[Tests](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-python/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "run python" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=run,python [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "run python " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'run,python'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "run python" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--command=value` → `CM_RUN_PYTHON_CMD=value`
+
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-python/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/run-python/run.bat)
+___
+#### Script output
+```bash
+cmr "run python " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-cm-core/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-cm-core/index.md
new file mode 100644
index 0000000000..d7ad5f859f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-cm-core/index.md
@@ -0,0 +1,87 @@
+# test-cm-core
+Automatically generated README for this automation recipe: **test-cm-core**
+
+Category: **[Tests](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-core/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-core/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "test cm core" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=test,cm,core
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "test cm core "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'test,cm,core'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "test cm core"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-core/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-core/run.bat)
+___
+#### Script output
+```bash
+cmr "test cm core " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-cm-script-pipeline/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-cm-script-pipeline/index.md
new file mode 100644
index 0000000000..4ab088034c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-cm-script-pipeline/index.md
@@ -0,0 +1,90 @@
+# test-cm-script-pipeline
+Automatically generated README for this automation recipe: **test-cm-script-pipeline**
+
+Category: **[Tests](..)**
+
+License: **Apache 2.0**
+
+Developers: Grigori Fursin
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-script-pipeline/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-script-pipeline/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "test cm-script pipeline" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=test,cm-script,pipeline
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "test cm-script pipeline "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'test,cm-script,pipeline'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "test cm-script pipeline"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-script-pipeline/run.sh)
+ * [run2.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-script-pipeline/run2.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-script-pipeline/run.bat)
+ * [run2.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-script-pipeline/run2.bat)
+___
+#### Script output
+```bash
+cmr "test cm-script pipeline " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-deps-conditions/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-deps-conditions/index.md
new file mode 100644
index 0000000000..976ddd6b6b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-deps-conditions/index.md
@@ -0,0 +1,93 @@
+# test-deps-conditions
+Automatically generated README for this automation recipe: **test-deps-conditions**
+
+Category: **[Tests](..)**
+
+License: **Apache 2.0**
+
+Developers: Grigori Fursin
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/test-deps-conditions/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/test-deps-conditions/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "test deps conditions" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=test,deps,conditions [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "test deps conditions " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'test,deps,conditions'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "test deps conditions" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--test1=value` → `CM_ENV1=value`
+ * `--test2=value` → `CM_ENV2=value`
+ * `--test3=value` → `CM_ENV3=value`
+
+
+
+
+___
+#### Script output
+```bash
+cmr "test deps conditions " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-deps-conditions2/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-deps-conditions2/index.md
new file mode 100644
index 0000000000..94ed26f620
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-deps-conditions2/index.md
@@ -0,0 +1,91 @@
+# test-deps-conditions2
+Automatically generated README for this automation recipe: **test-deps-conditions2**
+
+Category: **[Tests](..)**
+
+License: **Apache 2.0**
+
+Developers: Grigori Fursin
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/test-deps-conditions2/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/test-deps-conditions2/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "test deps conditions2" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=test,deps,conditions2 [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "test deps conditions2 " [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'test,deps,conditions2'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "test deps conditions2" [--input_flags]
+ ```
+___
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--test=value` → `TEST=value`
+
+
+
+
+___
+#### Script output
+```bash
+cmr "test deps conditions2 " [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-download-and-extract-artifacts/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-download-and-extract-artifacts/index.md
new file mode 100644
index 0000000000..8e2e0d0d19
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-download-and-extract-artifacts/index.md
@@ -0,0 +1,87 @@
+# test-download-and-extract-artifacts
+Automatically generated README for this automation recipe: **test-download-and-extract-artifacts**
+
+Category: **[Tests](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/test-download-and-extract-artifacts/README-extra.md)
+
+* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/test-download-and-extract-artifacts/_cm.yaml)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "test download-and-extract-artifacts" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=test,download-and-extract-artifacts
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "test download-and-extract-artifacts "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'test,download-and-extract-artifacts'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "test download-and-extract-artifacts"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-download-and-extract-artifacts/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/test-download-and-extract-artifacts/run.bat)
+___
+#### Script output
+```bash
+cmr "test download-and-extract-artifacts " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-set-sys-user-cm/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-set-sys-user-cm/index.md
new file mode 100644
index 0000000000..f4448d63be
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-set-sys-user-cm/index.md
@@ -0,0 +1,96 @@
+# test-set-sys-user-cm
+Automatically generated README for this automation recipe: **test-set-sys-user-cm**
+
+Category: **[Tests](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/test-set-sys-user-cm/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "demo set sys-user cm sys-user-cm" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=demo,set,sys-user,cm,sys-user-cm
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "demo set sys-user cm sys-user-cm "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'demo,set,sys-user,cm,sys-user-cm'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "demo set sys-user cm sys-user-cm"
+ ```
+___
+
+=== "Default environment"
+
+ #### Default environment
+
+
+ These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+ * CM_SUDO: `sudo`
+
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-set-sys-user-cm/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "demo set sys-user cm sys-user-cm " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/upgrade-python-pip/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/upgrade-python-pip/index.md
new file mode 100644
index 0000000000..3e593c7272
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/upgrade-python-pip/index.md
@@ -0,0 +1,86 @@
+# upgrade-python-pip
+Automatically generated README for this automation recipe: **upgrade-python-pip**
+
+Category: **[Tests](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/upgrade-python-pip/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "upgrade python pip python-pip" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=upgrade,python,pip,python-pip
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "upgrade python pip python-pip "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'upgrade,python,pip,python-pip'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "upgrade python pip python-pip"
+ ```
+___
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/upgrade-python-pip/run.sh)
+=== "Windows"
+
+ * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/upgrade-python-pip/run.bat)
+___
+#### Script output
+```bash
+cmr "upgrade python pip python-pip " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/create-fpgaconvnet-app-tinyml/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/create-fpgaconvnet-app-tinyml/index.md
new file mode 100644
index 0000000000..ae17fabc46
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/create-fpgaconvnet-app-tinyml/index.md
@@ -0,0 +1,114 @@
+# create-fpgaconvnet-app-tinyml
+Automatically generated README for this automation recipe: **create-fpgaconvnet-app-tinyml**
+
+Category: **[TinyML automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/create-fpgaconvnet-app-tinyml/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "create app fpgaconvnet" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=create,app,fpgaconvnet[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "create app fpgaconvnet [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'create,app,fpgaconvnet'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "create app fpgaconvnet[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**benchmark**"
+
+ Click here to expand this section.
+
+ * **`_ic`** (default)
+
+
+
+
+ * Group "**board**"
+
+ Click here to expand this section.
+
+ * **`_zc706`** (default)
+ - ENV variables:
+ - CM_TINY_BOARD: `zc706`
+
+
+
+
+ ##### Default variations
+
+ `_ic,_zc706`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/create-fpgaconvnet-app-tinyml/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "create app fpgaconvnet [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/create-fpgaconvnet-config-tinyml/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/create-fpgaconvnet-config-tinyml/index.md
new file mode 100644
index 0000000000..e1e0bab297
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/create-fpgaconvnet-config-tinyml/index.md
@@ -0,0 +1,114 @@
+# create-fpgaconvnet-config-tinyml
+Automatically generated README for this automation recipe: **create-fpgaconvnet-config-tinyml**
+
+Category: **[TinyML automation](..)**
+
+License: **Apache 2.0**
+
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/create-fpgaconvnet-config-tinyml/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "create config fpgaconvnet" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=create,config,fpgaconvnet[,variations]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "create config fpgaconvnet [variations]"
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'create,config,fpgaconvnet'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "create config fpgaconvnet[variations]"
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * Group "**benchmark**"
+
+ Click here to expand this section.
+
+ * **`_ic`** (default)
+
+
+
+
+ * Group "**board**"
+
+ Click here to expand this section.
+
+ * **`_zc706`** (default)
+ - ENV variables:
+ - CM_TINY_BOARD: `zc706`
+
+
+
+
+ ##### Default variations
+
+ `_ic,_zc706`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/create-fpgaconvnet-config-tinyml/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "create config fpgaconvnet [variations]" -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/flash-tinyml-binary/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/flash-tinyml-binary/index.md
new file mode 100644
index 0000000000..df6a2e2980
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/flash-tinyml-binary/index.md
@@ -0,0 +1,119 @@
+# flash-tinyml-binary
+Automatically generated README for this automation recipe: **flash-tinyml-binary**
+
+Category: **[TinyML automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/flash-tinyml-binary/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/flash-tinyml-binary/_cm.json)*
+* Output cached? *False*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "flash tiny mlperf mlcommons" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=flash,tiny,mlperf,mlcommons[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "flash tiny mlperf mlcommons [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'flash,tiny,mlperf,mlcommons'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "flash tiny mlperf mlcommons[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_NRF`
+ * `_NUCLEO`
+ * `_ad`
+ * `_cmsis_nn`
+ * `_ic`
+ * `_kws`
+ * `_native`
+ * `_vww`
+
+
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--build_dir=value` → `CM_TINY_BUILD_DIR=value`
+
+
+
+#### Versions
+Default version: `r1.0`
+
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/flash-tinyml-binary/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "flash tiny mlperf mlcommons [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-microtvm/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-microtvm/index.md
new file mode 100644
index 0000000000..b73c5eb0a5
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-microtvm/index.md
@@ -0,0 +1,119 @@
+# get-microtvm
+Automatically generated README for this automation recipe: **get-microtvm**
+
+Category: **[TinyML automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-microtvm/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-microtvm/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get src source microtvm tiny" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,src,source,microtvm,tiny[,variations] [--input_flags]
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get src source microtvm tiny [variations]" [--input_flags]
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,src,source,microtvm,tiny'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get src source microtvm tiny[variations]" [--input_flags]
+ ```
+___
+
+=== "Variations"
+
+
+ #### Variations
+
+ * *No group (any combination of variations can be selected)*
+
+ Click here to expand this section.
+
+ * `_full-history`
+ - ENV variables:
+ - CM_GIT_DEPTH: `--depth 10`
+ * `_short-history`
+ - ENV variables:
+ - CM_GIT_DEPTH: `--depth 10`
+
+
+
+=== "Input Flag Mapping"
+
+
+ #### Script flags mapped to environment
+
+ * `--ssh=value` → `CM_GIT_SSH=value`
+
+
+
+#### Versions
+Default version: `main`
+
+* `custom`
+* `main`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-microtvm/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get src source microtvm tiny [variations]" [--input_flags] -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-zephyr-sdk/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-zephyr-sdk/index.md
new file mode 100644
index 0000000000..e32311f971
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-zephyr-sdk/index.md
@@ -0,0 +1,93 @@
+# get-zephyr-sdk
+Automatically generated README for this automation recipe: **get-zephyr-sdk**
+
+Category: **[TinyML automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr-sdk/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr-sdk/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get zephyr-sdk" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,zephyr-sdk
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get zephyr-sdk "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,zephyr-sdk'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get zephyr-sdk"
+ ```
+___
+
+#### Versions
+Default version: `0.13.2`
+
+* `0.13.1`
+* `0.13.2`
+* `0.15.0`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr-sdk/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get zephyr-sdk " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-zephyr/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-zephyr/index.md
new file mode 100644
index 0000000000..6016b7ecf8
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-zephyr/index.md
@@ -0,0 +1,92 @@
+# get-zephyr
+Automatically generated README for this automation recipe: **get-zephyr**
+
+Category: **[TinyML automation](..)**
+
+License: **Apache 2.0**
+
+* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr/README-extra.md)
+
+* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr/_cm.json)*
+* Output cached? *True*
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://docs.mlcommons.org/ck/install)
+* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@cm4mlops```
+
+#### Print CM help from the command line
+
+````cmr "get zephyr" --help````
+
+#### Run this script
+
+=== "CLI"
+ ##### Run this script via CLI
+
+ ```bash
+ cm run script --tags=get,zephyr
+ ```
+=== "CLI Alt"
+ ##### Run this script via CLI (alternative)
+
+
+ ```bash
+ cmr "get zephyr "
+ ```
+
+=== "Python"
+ ##### Run this script from Python
+
+
+ ```python
+
+ import cmind
+
+ r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,zephyr'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+ if r['return']>0:
+ print (r['error'])
+
+ ```
+
+
+=== "Docker"
+ ##### Run this script via Docker (beta)
+
+ ```bash
+ cm docker script "get zephyr"
+ ```
+___
+
+#### Versions
+Default version: `v2.7`
+
+* `v2.7`
+
+#### Native script being run
+=== "Linux/macOS"
+ * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr/run-ubuntu.sh)
+ * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr/run.sh)
+=== "Windows"
+
+ No run file exists for Windows
+___
+#### Script output
+```bash
+cmr "get zephyr " -j
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/index.md
new file mode 100644
index 0000000000..1ac94a64ac
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/index.md
@@ -0,0 +1,6 @@
+* [create-fpgaconvnet-app-tinyml](create-fpgaconvnet-app-tinyml/index.md)
+* [create-fpgaconvnet-config-tinyml](create-fpgaconvnet-config-tinyml/index.md)
+* [flash-tinyml-binary](flash-tinyml-binary/index.md)
+* [get-microtvm](get-microtvm/index.md)
+* [get-zephyr](get-zephyr/index.md)
+* [get-zephyr-sdk](get-zephyr-sdk/index.md)
diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/index.md
new file mode 100644
index 0000000000..cc29ffc3e8
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/index.md
@@ -0,0 +1,30 @@
+* [AI-ML-datasets](AI-ML-datasets)
+* [AI-ML-frameworks](AI-ML-frameworks)
+* [AI-ML-models](AI-ML-models)
+* [AI-ML-optimization](AI-ML-optimization)
+* [Cloud-automation](Cloud-automation)
+* [CM-automation](CM-automation)
+* [CM-Interface](CM-Interface)
+* [CM-interface-prototyping](CM-interface-prototyping)
+* [Collective-benchmarking](Collective-benchmarking)
+* [Compiler-automation](Compiler-automation)
+* [CUDA-automation](CUDA-automation)
+* [Dashboard-automation](Dashboard-automation)
+* [Detection-or-installation-of-tools-and-artifacts](Detection-or-installation-of-tools-and-artifacts)
+* [DevOps-automation](DevOps-automation)
+* [Docker-automation](Docker-automation)
+* [GUI](GUI)
+* [Legacy-CK-support](Legacy-CK-support)
+* [MLPerf-benchmark-support](MLPerf-benchmark-support)
+* [Modular-AI-ML-application-pipeline](Modular-AI-ML-application-pipeline)
+* [Modular-application-pipeline](Modular-application-pipeline)
+* [Modular-MLPerf-benchmarks](Modular-MLPerf-benchmarks)
+* [Modular-MLPerf-inference-benchmark-pipeline](Modular-MLPerf-inference-benchmark-pipeline)
+* [Modular-MLPerf-training-benchmark-pipeline](Modular-MLPerf-training-benchmark-pipeline)
+* [Platform-information](Platform-information)
+* [Python-automation](Python-automation)
+* [Remote-automation](Remote-automation)
+* [Reproduce-MLPerf-benchmarks](Reproduce-MLPerf-benchmarks)
+* [Reproducibility-and-artifact-evaluation](Reproducibility-and-artifact-evaluation)
+* [Tests](Tests)
+* [TinyML-automation](TinyML-automation)
diff --git a/cmx4mlops/cmx4mlops/repo/mkdocs.yml b/cmx4mlops/cmx4mlops/repo/mkdocs.yml
new file mode 100644
index 0000000000..4cb2956917
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/mkdocs.yml
@@ -0,0 +1,77 @@
+site_name: CM Script Automation Documentation
+repo_url: https://github.com/mlcommons/cm4mlops
+theme:
+ name: material
+ logo: img/logo_v2.svg
+ favicon: img/logo_v2.svg
+ palette:
+ primary: deep purple
+ accent: green
+ features:
+ - content.tabs.link
+ - content.code.copy
+ - navigation.expand
+ - navigation.sections
+ - navigation.indexes
+ - navigation.instant
+ - navigation.tabs
+ - navigation.tabs.sticky
+ - navigation.top
+ - toc.follow
+nav:
+ - HOME: index.md
+ - Getting Started: getting-started.md
+ - CM Scripts:
+ - scripts/index.md
+ - Python-automation: scripts/Python-automation/index.md
+ - MLPerf-benchmark-support: scripts/MLPerf-benchmark-support/index.md
+ - Modular-AI-ML-application-pipeline: scripts/Modular-AI-ML-application-pipeline/index.md
+ - Modular-application-pipeline: scripts/Modular-application-pipeline/index.md
+ - Modular-MLPerf-inference-benchmark-pipeline: scripts/Modular-MLPerf-inference-benchmark-pipeline/index.md
+ - Modular-MLPerf-benchmarks: scripts/Modular-MLPerf-benchmarks/index.md
+ - Reproduce-MLPerf-benchmarks: scripts/Reproduce-MLPerf-benchmarks/index.md
+ - Modular-MLPerf-training-benchmark-pipeline: scripts/Modular-MLPerf-training-benchmark-pipeline/index.md
+ - DevOps-automation: scripts/DevOps-automation/index.md
+ - Docker-automation: scripts/Docker-automation/index.md
+ - AI-ML-optimization: scripts/AI-ML-optimization/index.md
+ - AI-ML-models: scripts/AI-ML-models/index.md
+ - CM-automation: scripts/CM-automation/index.md
+ - TinyML-automation: scripts/TinyML-automation/index.md
+ - Cloud-automation: scripts/Cloud-automation/index.md
+ - Platform-information: scripts/Platform-information/index.md
+ - Detection-or-installation-of-tools-and-artifacts: scripts/Detection-or-installation-of-tools-and-artifacts/index.md
+ - Compiler-automation: scripts/Compiler-automation/index.md
+ - CM-Interface: scripts/CM-Interface/index.md
+ - Legacy-CK-support: scripts/Legacy-CK-support/index.md
+ - AI-ML-datasets: scripts/AI-ML-datasets/index.md
+ - CUDA-automation: scripts/CUDA-automation/index.md
+ - AI-ML-frameworks: scripts/AI-ML-frameworks/index.md
+ - Reproducibility-and-artifact-evaluation: scripts/Reproducibility-and-artifact-evaluation/index.md
+ - GUI: scripts/GUI/index.md
+ - Collective-benchmarking: scripts/Collective-benchmarking/index.md
+ - Tests: scripts/Tests/index.md
+ - Dashboard-automation: scripts/Dashboard-automation/index.md
+ - Remote-automation: scripts/Remote-automation/index.md
+ - CM-interface-prototyping: scripts/CM-interface-prototyping/index.md
+
+markdown_extensions:
+ - pymdownx.tasklist:
+ custom_checkbox: true
+ - pymdownx.details
+ - admonition
+ - attr_list
+ - def_list
+ - footnotes
+ - pymdownx.superfences:
+ custom_fences:
+ - name: mermaid
+ class: mermaid
+ format: !!python/name:pymdownx.superfences.fence_code_format
+ - pymdownx.tabbed:
+ alternate_style: true
+ - toc:
+ slugify: !!python/object/apply:pymdownx.slugs.slugify {kwds: {case: lower}}
+plugins:
+ - search
+ - macros
+ - caseinsensitivefiles
diff --git a/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-analysis-ctuning/README.md b/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-analysis-ctuning/README.md
new file mode 100644
index 0000000000..9d4b696949
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-analysis-ctuning/README.md
@@ -0,0 +1,93 @@
+On this page, we highlight some of the exciting submissions done by CTuning for the MLCommons Inference 3.1 round.
+
+## Top Results in Edge Category
+
+In the edge category, Rigel Supercomputers from One Stop Systems achieved the peak offline performance for the four submitted benchmarks - Image classification (ResNet50), Object detection (RetinaNet), Language processing (Bert) and Speech Recognition (RNNT). The below graph compares the peak performance of bert-99 model among the top 10 performing systems.
+
+
+
+
+Nvidia RTX 4090 has the best performance for performance per accelerator, and this accelerator is assembled on a PC made by PCSPECIALIST UK. The below graph compares the performance per accelerator of bert-99 model among the top 10 performing systems.
+
+
+
+
+Nvidia RTX 4090 wins the latency metric too for ResNet50, Bert and 3d-unet in the SingleStream scenario.
+
+
+
+
+
+
+
+## Best energy efficient results in Edge category
+
+For the Speech Recognition model rnnt, CTuning submitted the best power-efficient result on Nvidia Jetson Orin AGX.
+
+
+
+
+For the Medical Imaging model 3d-unet where the samples per second is quite low, the best 4 energy efficient results are by CTuning.
+
+
+
+For the Language Processing model bert-99, gloria highend system from Qualcomm tops the energy efficiency metric and CTuning's Nvidia Jetson Orin AGX is at second place.
+
+
+
+
+
+## Benchmarking Rigel Supercomputer
+
+Rigel Edge Supercomputer from OneStopSytems wins the peak performance for all four submitted models and comfortably beats the second-place system. It also wins the best latency for ResNet50 MultiStream scenario.
+
+
+
+
+
+
+
+
+
+
+## Benchmarking MLPerf Inference Reference Implementations
+
+We compared the performance of the reference implementation with that of the Nvidia optimized implementation by running both implementations on an Nvidia RTX 4090 GPU. Reference implementation uses fp32 models whereas Nvidia implementation uses quantized models.
+
+
+
+
+
+
+
+## Showcasing Apple Metal Performance
+
+We benchmarked the performance of Apple metal using Tensorflow-metal. The below graphs show the performance benefit of running inference on Apple meta using tensorflow-metal versus onnxruntime running only on CPUs.
+
+
+
+
+
+
+
+
+
+
+
+## Design Space Exploration For NeuralMagic Deepsparse Library
+
+Using CM experiment automation we did a design space exploration to find the optimal batch size for the bert-99 compatible sparse models.
+
+
+
+
+
+
+## Comparing the performance of Modular MLPerf Inference C++ implementations
+
+Here we compare the performance of MIL Library used by CTuning and the KILT library used by KRAI both on CPUs and GPUs. This is not an apple-to-apple comparison as KILT used Nvidia Nvidia A1000 GPU and MIL was run on Nvidia RTX 4090 GPU. For CPUs, KILT was run on a [24-core Dell server](https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Krai/systems/7920t-kilt-onnxruntime_cpu.json) with peak frequency of 4000 MHz whereas MIL was run on a [16 core PCSPECIALIST custom workstation](https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/CTuning/systems/amd_ryzen_workstation-cpp-cpu-onnxruntime-vdefault-default_config.json) with peak frequency of 5900 MHz.
+
+
+
+
+
diff --git a/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-analysis-ctuning/_cm.json b/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-analysis-ctuning/_cm.json
new file mode 100644
index 0000000000..4860af17b2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-analysis-ctuning/_cm.json
@@ -0,0 +1,16 @@
+{
+ "alias": "mlperf-inference-v3.1-analysis-ctuning",
+ "automation_alias": "report",
+ "automation_uid": "6462ecdba2054467",
+ "date":"20230917",
+ "title":"cTuning's analysis of MLPerf inference v3.1 community results",
+ "tags": [
+ "mlperf",
+ "inference",
+ "mlperf-inference",
+ "v3.1",
+ "analysis",
+ "ctuning"
+ ],
+ "uid": "ebc483653dbc45b6"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-press-release-ctuning/_cm.json b/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-press-release-ctuning/_cm.json
new file mode 100644
index 0000000000..99d0370a50
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-press-release-ctuning/_cm.json
@@ -0,0 +1,17 @@
+{
+ "alias": "mlperf-inference-v3.1-press-release-ctuning",
+ "automation_alias": "report",
+ "automation_uid": "6462ecdba2054467",
+ "date": "20230913",
+ "redirect": "https://www.linkedin.com/pulse/new-milestone-make-mlperf-benchmarks-accessible-everyone-fursin",
+ "tags": [
+ "mlperf",
+ "inference",
+ "mlperf-inference",
+ "v3.1",
+ "analysis",
+ "ctuning"
+ ],
+ "title": "cTuning press-release about making MLPerf inference accessible to everyone",
+ "uid": "85ff4a6ac203411e"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-press-release-hpcwire/_cm.json b/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-press-release-hpcwire/_cm.json
new file mode 100644
index 0000000000..159a986735
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-press-release-hpcwire/_cm.json
@@ -0,0 +1,17 @@
+{
+ "alias": "mlperf-inference-v3.1-press-release-hpcwire",
+ "automation_alias": "report",
+ "automation_uid": "6462ecdba2054467",
+ "date": "20230913",
+ "tags": [
+ "mlperf",
+ "inference",
+ "mlperf-inference",
+ "v3.1",
+ "analysis",
+ "ctuning"
+ ],
+ "redirect": "https://www.hpcwire.com/2023/09/13/mlperf-releases-latest-inference-results-and-new-storage-benchmark",
+ "title": "HPCWire about MLPerf inference v3.1 and storage results (with cTuning/cKnowledge coverage)",
+ "uid": "50960565640142d6"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v4.0-press-release-ctuning/_cm.json b/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v4.0-press-release-ctuning/_cm.json
new file mode 100644
index 0000000000..15c3fa6c42
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v4.0-press-release-ctuning/_cm.json
@@ -0,0 +1,17 @@
+{
+ "alias": "mlperf-inference-v4.0-press-release-ctuning",
+ "automation_alias": "report",
+ "automation_uid": "6462ecdba2054467",
+ "date": "20230913",
+ "redirect": "https://www.linkedin.com/pulse/new-cm-mlperf-automation-helps-benchmark-commodity-hardware-fursin-61noe",
+ "tags": [
+ "mlperf",
+ "inference",
+ "mlperf-inference",
+ "v4.0",
+ "analysis",
+ "ctuning"
+ ],
+ "title": "cTuning press-release about a new version of the CM workflow to automate MLPerf",
+ "uid": "acc35b8e9ed14c98"
+}
diff --git a/cmx4mlops/cmx4mlops/repo/requirements.txt b/cmx4mlops/cmx4mlops/repo/requirements.txt
new file mode 100644
index 0000000000..b1eac3c174
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/requirements.txt
@@ -0,0 +1,6 @@
+cmind>=2.0.1
+pyyaml
+requests
+setuptools
+giturlparse
+tabulate
diff --git a/cmx4mlops/cmx4mlops/repo/script/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/COPYRIGHT.md
new file mode 100644
index 0000000000..696f829223
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone.
diff --git a/cmx4mlops/cmx4mlops/repo/script/README.md b/cmx4mlops/cmx4mlops/repo/script/README.md
new file mode 100644
index 0000000000..d2667369c0
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/README.md
@@ -0,0 +1,40 @@
+## About
+
+Portable CM automations for MLOps and MLPerf.
+
+## License
+
+[Apache 2.0](../../LICENSE.md)
+
+## Copyright
+
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone.
+
+This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at:
+
+[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)
+
+Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License.
+
+## Maintainer(s)
+
+* MLCommons
+
+## CM author
+
+[Grigori Fursin](https://cKnowledge.org/gfursin)
+
+## CM script developers
+
+Arjun Suresh, Anandhu Sooraj, Grigori Fursin
+
+## Parent project
+
+Visit the [parent Collective Knowledge project](https://github.com/mlcommons/ck) for further details.
+
+## Citing this project
+
+If you found the CM automations helpful, kindly reference this article:
+[ [ArXiv](https://arxiv.org/abs/2406.16791) ]
diff --git a/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/COPYRIGHT.md
new file mode 100644
index 0000000000..696f829223
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone.
diff --git a/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/README-extra.md
new file mode 100644
index 0000000000..2b61d193cd
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/README-extra.md
@@ -0,0 +1,7 @@
+# About
+
+Activate python virtual environment installed via CM:
+
+```bash
+cm run script "activate python-ven" (--version={python version}) (--name={user friendly name of the virtual environment))
+```
diff --git a/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/README.md b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/README.md
new file mode 100644
index 0000000000..c9c3db32f0
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/README.md
@@ -0,0 +1 @@
+Please see [https://docs.mlcommons.org/cm4mlops/scripts/Python-automation/activate-python-venv](https://docs.mlcommons.org/cm4mlops/scripts/Python-automation/activate-python-venv) for the documentation of this CM script.
diff --git a/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/_cm.yaml
new file mode 100644
index 0000000000..ed65cf2f1c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/_cm.yaml
@@ -0,0 +1,18 @@
+alias: activate-python-venv
+automation_alias: script
+automation_uid: 5b4e0237da074764
+category: Python automation
+developers: '[Grigori Fursin](https://cKnowledge.org/gfursin)'
+name: Activate virtual Python environment
+prehook_deps:
+- names:
+ - python-venv
+ reuse_version: true
+ tags: install,python-venv
+tags:
+- activate
+- python
+- activate-python-venv
+- python-venv
+tags_help: activate python-venv
+uid: fcbbb84946f34c55
diff --git a/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/customize.py b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/customize.py
new file mode 100644
index 0000000000..c22b25b65a
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/customize.py
@@ -0,0 +1,42 @@
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+from cmind import utils
+import os
+
+
+def preprocess(i):
+
+ os_info = i['os_info']
+
+ env = i['env']
+
+ meta = i['meta']
+
+ automation = i['automation']
+
+ quiet = (env.get('CM_QUIET', False) == 'yes')
+
+ name = env.get('CM_NAME', '')
+ if name != '':
+ name = name.strip().lower()
+
+ r = automation.update_deps({'deps': meta['prehook_deps'],
+ 'update_deps': {
+ 'python-venv': {
+ 'name': name
+ }
+ }
+ })
+ if r['return'] > 0:
+ return r
+
+ return {'return': 0}
diff --git a/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/run.bat b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/run.bat
new file mode 100644
index 0000000000..5ca2ac0edd
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/run.bat
@@ -0,0 +1,7 @@
+echo.
+echo call "%CM_VIRTUAL_ENV_SCRIPTS_PATH%\activate.bat && cmd"
+echo.
+echo Enter exit to exit virtual env.
+echo.
+
+call %CM_VIRTUAL_ENV_SCRIPTS_PATH%\activate.bat && cmd
diff --git a/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/run.sh b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/run.sh
new file mode 100644
index 0000000000..6569b07e55
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/run.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+echo ""
+echo " bash --init-file ${CM_VIRTUAL_ENV_SCRIPTS_PATH}/activate"
+echo ""
+echo " Enter exit to exit virtual env."
+echo ""
+
+bash --init-file ${CM_VIRTUAL_ENV_SCRIPTS_PATH}/activate
diff --git a/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/COPYRIGHT.md
new file mode 100644
index 0000000000..696f829223
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone.
diff --git a/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/README-extra.md
new file mode 100644
index 0000000000..baa487880e
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/README-extra.md
@@ -0,0 +1,2 @@
+# About
+This CM script detects the system details using Nvidia script
diff --git a/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/README.md b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/README.md
new file mode 100644
index 0000000000..f107355818
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/README.md
@@ -0,0 +1 @@
+Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/add-custom-nvidia-system](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/add-custom-nvidia-system) for the documentation of this CM script.
diff --git a/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/_cm.yaml
new file mode 100644
index 0000000000..6dce8414db
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/_cm.yaml
@@ -0,0 +1,126 @@
+# Identification of this CM script
+alias: add-custom-nvidia-system
+uid: b2e6c46c6e8745a3
+cache: true
+automation_alias: script
+automation_uid: 5b4e0237da074764
+
+category: "MLPerf benchmark support"
+docker:
+ real_run: False
+
+# User-friendly tags to find this CM script
+tags:
+ - add
+ - custom
+ - system
+ - nvidia
+
+
+# Dependencies on other CM scripts
+
+deps:
+
+ # Detect host OS features
+ - tags: detect,os
+
+ # Detect host CPU features
+ - tags: detect,cpu
+
+ # Install system dependencies on a given host
+ - tags: get,sys-utils-cm
+
+ # Detect python3
+ - tags: get,python3
+ names:
+ - python
+ - python3
+
+ # Detect CUDA
+ - tags: get,cuda,_cudnn
+
+ # Detect Tensorrt
+ - tags: get,tensorrt
+
+ # Detect CMake
+ - tags: get,cmake
+
+ # Detect requests
+ - tags: get,generic-python-lib,_requests
+
+ # Detect Google Logger
+ - tags: get,generic,sys-util,_glog-dev
+
+ # Detect GFlags
+ - tags: get,generic,sys-util,_gflags-dev
+
+ # Detect libre2-dev
+ - tags: get,generic,sys-util,_libre2-dev
+
+ # Detect libnuma-dev
+ - tags: get,generic,sys-util,_libnuma-dev
+
+ # Detect libboost-all-dev
+ - tags: get,generic,sys-util,_libboost-all-dev
+
+ # Detect rapidjson-dev
+ - tags: get,generic,sys-util,_rapidjson-dev
+
+ # Download Nvidia Submission Code
+ - tags: get,nvidia,mlperf,inference,common-code
+ names:
+ - nvidia-inference-common-code
+
+ # Detect pycuda
+ - tags: get,generic-python-lib,_pycuda
+
+variations:
+ nvidia-only:
+ group: code
+ add_deps_recursive:
+ nvidia-inference-common-code:
+ tags: _nvidia-only
+ custom:
+ group: code
+ add_deps_recursive:
+ nvidia-inference-common-code:
+ tags: _custom
+ mlcommons:
+ group: code
+ add_deps_recursive:
+ nvidia-inference-common-code:
+ tags: _mlcommons
+ ctuning:
+ group: code
+ add_deps_recursive:
+ nvidia-inference-common-code:
+ tags: _ctuning
+ go:
+ group: code
+ add_deps_recursive:
+ nvidia-inference-common-code:
+ tags: _go
+
+
+
+
+versions:
+ r2.1:
+ add_deps_recursive:
+ nvidia-inference-common-code:
+ version: r2.1
+
+ r3.0:
+ add_deps_recursive:
+ nvidia-inference-common-code:
+ version: r3.0
+
+ r3.1:
+ add_deps_recursive:
+ nvidia-inference-common-code:
+ version: r3.1
+
+ r4.0:
+ add_deps_recursive:
+ nvidia-inference-common-code:
+ version: r4.0
diff --git a/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/customize.py b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/customize.py
new file mode 100644
index 0000000000..016d9cdcd4
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/customize.py
@@ -0,0 +1,35 @@
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+from cmind import utils
+import os
+import shutil
+
+
+def preprocess(i):
+
+ os_info = i['os_info']
+
+ if os_info['platform'] == 'windows':
+ return {'return': 1, 'error': 'Windows is not supported in this script yet'}
+
+ env = i['env']
+
+ return {'return': 0}
+
+
+def postprocess(i):
+
+ env = i['env']
+
+ env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH']
+
+ return {'return': 0}
diff --git a/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/run.sh b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/run.sh
new file mode 100644
index 0000000000..b89617f7f2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/run.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+CUR=$PWD
+cd ${CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH}
+${CM_PYTHON_BIN_WITH_PATH} scripts/custom_systems/add_custom_system.py
+test $? -eq 0 || exit $?
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/COPYRIGHT.md
new file mode 100644
index 0000000000..696f829223
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone.
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/README-extra.md
new file mode 100644
index 0000000000..e379e2544e
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/README-extra.md
@@ -0,0 +1,17 @@
+# About
+
+See [this tutorial](https://github.com/mlcommons/ck/blob/master/docs/tutorials/modular-image-classification.md).
+
+# Collaborative testing
+
+## Windows 11
+
+* CUDA 11.8; cuDNN 8.7.0; ONNX GPU 1.16.1
+
+## Windows 10
+
+* CUDA 11.6; cuDNN 8.6.0.96; ONNX GPU 1.13.1
+
+## Ubuntu 22.04
+
+* CUDA 11.3; ONNX 1.12.0
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/README.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/README.md
new file mode 100644
index 0000000000..1efef82016
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/README.md
@@ -0,0 +1 @@
+Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-AI-ML-application-pipeline/app-image-classification-onnx-py](https://docs.mlcommons.org/cm4mlops/scripts/Modular-AI-ML-application-pipeline/app-image-classification-onnx-py) for the documentation of this CM script.
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/_cm.yaml
new file mode 100644
index 0000000000..740a8a18ab
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/_cm.yaml
@@ -0,0 +1,120 @@
+alias: app-image-classification-onnx-py
+uid: 3d5e908e472b417e
+
+automation_alias: script
+automation_uid: 5b4e0237da074764
+
+category: "Modular AI/ML application pipeline"
+
+tags:
+- app
+- modular
+- image-classification
+- onnx
+- python
+
+tags_help: "modular python app image-classification onnx"
+
+default_env:
+ CM_BATCH_COUNT: '1'
+ CM_BATCH_SIZE: '1'
+
+
+deps:
+- tags: detect,os
+#- tags: get,sys-utils-cm
+- names:
+ - python
+ - python3
+ tags: get,python3
+
+- tags: get,cuda
+ names:
+ - cuda
+ enable_if_env:
+ USE_CUDA:
+ - yes
+- tags: get,cudnn
+ names:
+ - cudnn
+ enable_if_env:
+ USE_CUDA:
+ - yes
+
+- tags: get,dataset,imagenet,image-classification,original,_run-during-docker-build
+
+- tags: get,dataset-aux,imagenet-aux,image-classification
+- tags: get,ml-model,resnet50,_onnx,image-classification
+ names:
+ - ml-model
+
+- tags: get,generic-python-lib,_package.Pillow
+- tags: get,generic-python-lib,_package.numpy
+ version_max: "1.99.99"
+- tags: get,generic-python-lib,_package.opencv-python
+
+
+- tags: get,generic-python-lib,_onnxruntime
+ names:
+ - onnxruntime
+ skip_if_env:
+ USE_CUDA:
+ - yes
+- tags: get,generic-python-lib,_onnxruntime_gpu
+ names:
+ - onnxruntime
+ enable_if_env:
+ USE_CUDA:
+ - yes
+
+variations:
+ cuda:
+ docker:
+ all_gpus: 'yes'
+ group: target
+ env:
+ USE_CUDA: yes
+
+ cpu:
+ group: target
+ default: yes
+ env:
+ USE_CPU: yes
+
+input_mapping:
+ input: CM_IMAGE
+ output: CM_APP_IMAGE_CLASSIFICATION_ONNX_PY_OUTPUT
+
+
+new_env_keys:
+ - CM_APP_IMAGE_CLASSIFICATION_ONNX_PY*
+
+
+new_state_keys:
+ - cm_app_image_classification_onnx_py
+
+
+input_description:
+ input:
+ desc: "Path to JPEG image to classify"
+ output:
+ desc: "Output directory (optional)"
+ j:
+ desc: "Print JSON output"
+ boolean: true
+
+docker:
+ skip_run_cmd: 'no'
+ skip_cm_sys_upgrade: 'yes'
+ cm_repo_flags: '--branch=dev'
+ use_host_group_id: 'yes'
+ image_tag_extra: '-cm-dev'
+ input_paths:
+ - input
+ - env.CM_IMAGE
+ - output
+ skip_input_for_fake_run:
+ - input
+ - env.CM_IMAGE
+ - output
+ - j
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/customize.py b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/customize.py
new file mode 100644
index 0000000000..0ca34fc952
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/customize.py
@@ -0,0 +1,77 @@
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+from cmind import utils
+import os
+import shutil
+
+
+def preprocess(i):
+
+ os_info = i['os_info']
+ env = i['env']
+
+# print ('')
+# print ('Running preprocess function in customize.py ...')
+
+ return {'return': 0}
+
+
+def postprocess(i):
+
+ os_info = i['os_info']
+ env = i['env']
+ state = i['state']
+
+
+# print ('')
+# print ('Running postprocess function in customize.py ...')
+
+ # Saving predictions to JSON file to current directory
+ # Should work with "cm docker script" ?
+
+ data = state.get('cm_app_image_classification_onnx_py', {})
+
+ fjson = 'cm-image-classification-onnx-py.json'
+ fyaml = 'cm-image-classification-onnx-py.yaml'
+
+ output = env.get('CM_APP_IMAGE_CLASSIFICATION_ONNX_PY_OUTPUT', '')
+ if output != '':
+ if not os.path.exists(output):
+ os.makedirs(output)
+
+ fjson = os.path.join(output, fjson)
+ fyaml = os.path.join(output, fyaml)
+
+ try:
+ import json
+ with open(fjson, 'w', encoding='utf-8') as f:
+ json.dump(data, f, ensure_ascii=False, indent=4)
+ except Exception as e:
+ print('CM warning: {}'.format(e))
+
+ try:
+ import yaml
+ with open(fyaml, 'w', encoding='utf-8') as f:
+ yaml.dump(data, f)
+ except Exception as e:
+ print('CM warning: {}'.format(e))
+
+ top_classification = data.get('top_classification', '')
+
+ if env.get('CM_TMP_SILENT', '') != 'yes':
+ if top_classification != '':
+ print('')
+ x = 'Top classification: {}'.format(top_classification)
+ print('=' * len(x))
+ print(x)
+
+ return {'return': 0}
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/img/computer_mouse.jpg b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/img/computer_mouse.jpg
new file mode 100644
index 0000000000..e7f8abb6fe
Binary files /dev/null and b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/img/computer_mouse.jpg differ
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/requirements.txt b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/requirements.txt
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/run.bat b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/run.bat
new file mode 100644
index 0000000000..ee7db98674
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/run.bat
@@ -0,0 +1,29 @@
+rem echo %CM_PYTHON_BIN%
+rem echo %CM_DATASET_PATH%
+rem echo %CM_DATASET_AUX_PATH%
+rem echo %CM_ML_MODEL_FILE_WITH_PATH%
+
+rem connect CM intelligent components with CK env
+set CK_ENV_ONNX_MODEL_ONNX_FILEPATH=%CM_ML_MODEL_FILE_WITH_PATH%
+set CK_ENV_ONNX_MODEL_INPUT_LAYER_NAME=input_tensor:0
+set CK_ENV_ONNX_MODEL_OUTPUT_LAYER_NAME=softmax_tensor:0
+set CK_ENV_DATASET_IMAGENET_VAL=%CM_DATASET_PATH%
+set CK_CAFFE_IMAGENET_SYNSET_WORDS_TXT=%CM_DATASET_AUX_PATH%\synset_words.txt
+set ML_MODEL_DATA_LAYOUT=NCHW
+set CK_BATCH_SIZE=%CM_BATCH_SIZE%
+set CK_BATCH_COUNT=%CM_BATCH_COUNT%
+
+IF NOT DEFINED CM_TMP_CURRENT_SCRIPT_PATH SET CM_TMP_CURRENT_SCRIPT_PATH=%CD%
+
+IF DEFINED CM_INPUT SET CM_IMAGE=%CM_INPUT%
+
+echo.
+%CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt
+IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
+
+echo.
+%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\src\onnx_classify.py
+IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
+
+rem Just a demo to pass environment variables from native scripts back to CM workflows
+echo CM_APP_IMAGE_CLASSIFICATION_ONNX_PY=sucess > tmp-run-env.out
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/run.sh
new file mode 100644
index 0000000000..62b07e1f10
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/run.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+if [[ ${CM_RUN_DOCKER_CONTAINER} == "yes" ]]; then
+ exit 0
+fi
+
+#echo ${CM_PYTHON_BIN}
+#echo ${CM_DATASET_PATH}
+#echo ${CM_DATASET_AUX_PATH}
+#echo ${CM_ML_MODEL_FILE_WITH_PATH}
+CM_PYTHON_BIN=${CM_PYTHON_BIN_WITH_PATH:-python3}
+CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD}
+
+# connect CM intelligent components with CK env
+export CK_ENV_ONNX_MODEL_ONNX_FILEPATH=${CM_ML_MODEL_FILE_WITH_PATH}
+export CK_ENV_ONNX_MODEL_INPUT_LAYER_NAME="input_tensor:0"
+export CK_ENV_ONNX_MODEL_OUTPUT_LAYER_NAME="softmax_tensor:0"
+export CK_ENV_DATASET_IMAGENET_VAL=${CM_DATASET_PATH}
+export CK_CAFFE_IMAGENET_SYNSET_WORDS_TXT=${CM_DATASET_AUX_PATH}/synset_words.txt
+export ML_MODEL_DATA_LAYOUT="NCHW"
+export CK_BATCH_SIZE=${CM_BATCH_SIZE}
+export CK_BATCH_COUNT=${CM_BATCH_COUNT}
+
+if [[ "${CM_INPUT}" != "" ]]; then export CM_IMAGE=${CM_INPUT}; fi
+
+PIP_EXTRA=`${CM_PYTHON_BIN} -c "import importlib.metadata; print(' --break-system-packages ' if int(importlib.metadata.version('pip').split('.')[0]) >= 23 else '')"`
+
+echo ""
+${CM_PYTHON_BIN} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${PIP_EXTRA}
+test $? -eq 0 || exit 1
+
+echo ""
+${CM_PYTHON_BIN} ${CM_TMP_CURRENT_SCRIPT_PATH}/src/onnx_classify.py
+test $? -eq 0 || exit 1
+
+# Just a demo to pass environment variables from native scripts back to CM workflows
+echo "CM_APP_IMAGE_CLASSIFICATION_ONNX_PY=sucess" > tmp-run-env.out
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/src/onnx_classify.py b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/src/onnx_classify.py
new file mode 100644
index 0000000000..c2c5a6ceb0
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/src/onnx_classify.py
@@ -0,0 +1,204 @@
+#!/usr/bin/env python3
+
+# Extended by Grigori Fursin to support MLCommons CM workflow automation
+# language
+
+import os
+import onnxruntime as rt
+import numpy as np
+import time
+import json
+
+from PIL import Image
+
+model_path = os.environ['CK_ENV_ONNX_MODEL_ONNX_FILEPATH']
+input_layer_name = os.environ['CK_ENV_ONNX_MODEL_INPUT_LAYER_NAME']
+output_layer_name = os.environ['CK_ENV_ONNX_MODEL_OUTPUT_LAYER_NAME']
+normalize_data_bool = os.getenv(
+ 'CK_ENV_ONNX_MODEL_NORMALIZE_DATA', '0') in (
+ 'YES', 'yes', 'ON', 'on', '1')
+subtract_mean_bool = os.getenv(
+ 'CK_ENV_ONNX_MODEL_SUBTRACT_MEAN', '0') in (
+ 'YES', 'yes', 'ON', 'on', '1')
+given_channel_means = os.getenv('ML_MODEL_GIVEN_CHANNEL_MEANS', '')
+if given_channel_means:
+ given_channel_means = np.array(
+ given_channel_means.split(' '),
+ dtype=np.float32)
+
+imagenet_path = os.environ['CK_ENV_DATASET_IMAGENET_VAL']
+labels_path = os.environ['CK_CAFFE_IMAGENET_SYNSET_WORDS_TXT']
+data_layout = os.environ['ML_MODEL_DATA_LAYOUT']
+batch_size = int(os.environ['CK_BATCH_SIZE'])
+batch_count = int(os.environ['CK_BATCH_COUNT'])
+CPU_THREADS = int(os.getenv('CK_HOST_CPU_NUMBER_OF_PROCESSORS', 0))
+
+
+def load_labels(labels_filepath):
+ my_labels = []
+ input_file = open(labels_filepath, 'r')
+ for l in input_file:
+ my_labels.append(l.strip())
+ return my_labels
+
+
+def load_and_resize_image(image_filepath, height, width):
+ # sic! The order of dimensions in resize is (W,H)
+ pillow_img = Image.open(image_filepath).resize((width, height))
+
+ # Grigori fixed below
+ # input_data = np.float32(pillow_img)
+ input_data = np.asarray(pillow_img)
+ input_data = np.asarray(input_data, np.float32)
+
+ # Normalize
+ if normalize_data_bool:
+ input_data = input_data / 127.5 - 1.0
+
+ # Subtract mean value
+ if subtract_mean_bool:
+ if len(given_channel_means):
+ input_data -= given_channel_means
+ else:
+ input_data -= np.mean(input_data)
+
+# print(np.array(pillow_img).shape)
+ nhwc_data = np.expand_dims(input_data, axis=0)
+
+ if data_layout == 'NHWC':
+ # print(nhwc_data.shape)
+ return nhwc_data
+ else:
+ nchw_data = nhwc_data.transpose(0, 3, 1, 2)
+ # print(nchw_data.shape)
+ return nchw_data
+
+
+def load_a_batch(batch_filenames):
+ unconcatenated_batch_data = []
+ for image_filename in batch_filenames:
+ image_filepath = image_filename
+ nchw_data = load_and_resize_image(image_filepath, height, width)
+ unconcatenated_batch_data.append(nchw_data)
+ batch_data = np.concatenate(unconcatenated_batch_data, axis=0)
+
+ return batch_data
+
+
+# print("Device: " + rt.get_device())
+sess_options = rt.SessionOptions()
+
+if CPU_THREADS > 0:
+ sess_options.enable_sequential_execution = False
+ sess_options.session_thread_pool_size = CPU_THREADS
+
+if len(rt.get_all_providers()) > 1 and os.environ.get(
+ "USE_CUDA", "yes").lower() not in ["0", "false", "off", "no"]:
+ # Currently considering only CUDAExecutionProvider
+ sess = rt.InferenceSession(
+ model_path,
+ sess_options,
+ providers=['CUDAExecutionProvider'])
+else:
+ sess = rt.InferenceSession(
+ model_path,
+ sess_options,
+ providers=["CPUExecutionProvider"])
+
+# FIXME: check that input_layer_name belongs to this list
+input_layer_names = [x.name for x in sess.get_inputs()]
+input_layer_name = input_layer_name or input_layer_names[0]
+
+# FIXME: check that output_layer_name belongs to this list
+output_layer_names = [x.name for x in sess.get_outputs()]
+output_layer_name = output_layer_name or output_layer_names[0]
+
+model_input_shape = sess.get_inputs()[0].shape
+model_classes = sess.get_outputs()[1].shape[1]
+labels = load_labels(labels_path)
+# 1 means the labels represent classes 1..1000 and the background class 0
+# has to be skipped
+bg_class_offset = model_classes - len(labels)
+
+if data_layout == 'NHWC':
+ (samples, height, width, channels) = model_input_shape
+else:
+ (samples, channels, height, width) = model_input_shape
+
+print("")
+print("Data layout: {}".format(data_layout))
+print("Input layers: {}".format([str(x) for x in sess.get_inputs()]))
+print("Output layers: {}".format([str(x) for x in sess.get_outputs()]))
+print("Input layer name: " + input_layer_name)
+print("Expected input shape: {}".format(model_input_shape))
+print("Output layer name: " + output_layer_name)
+print("Data normalization: {}".format(normalize_data_bool))
+print("Subtract mean: {}".format(subtract_mean_bool))
+print('Per-channel means to subtract: {}'.format(given_channel_means))
+print("Background/unlabelled classes to skip: {}".format(bg_class_offset))
+print("")
+
+starting_index = 1
+
+start_time = time.time()
+
+for batch_idx in range(batch_count):
+ print('')
+ print("Batch {}/{}:".format(batch_idx + 1, batch_count))
+
+ batch_filenames = [
+ imagenet_path +
+ '/' +
+ "ILSVRC2012_val_00000{:03d}.JPEG".format(
+ starting_index +
+ batch_idx *
+ batch_size +
+ i) for i in range(batch_size)]
+
+ # Grigori: trick to test models:
+ if os.environ.get('CM_IMAGE', '') != '':
+ batch_filenames = [os.environ['CM_IMAGE']]
+
+ batch_data = load_a_batch(batch_filenames)
+ # print(batch_data.shape)
+
+ batch_predictions = sess.run(
+ [output_layer_name], {
+ input_layer_name: batch_data})[0]
+
+ cm_status = {'classifications': []}
+
+ print('')
+ top_classification = ''
+ for in_batch_idx in range(batch_size):
+ # skipping the background class on the left (if present)
+ softmax_vector = batch_predictions[in_batch_idx][bg_class_offset:]
+ top5_indices = list(reversed(softmax_vector.argsort()))[:5]
+
+ print(' * ' + batch_filenames[in_batch_idx] + ' :')
+
+ for class_idx in top5_indices:
+ if top_classification == '':
+ top_classification = labels[class_idx]
+
+ print(
+ "\t{}\t{}\t{}".format(
+ class_idx,
+ softmax_vector[class_idx],
+ labels[class_idx]))
+
+ cm_status['classifications'].append({'class_idx': int(class_idx),
+ 'softmax': float(softmax_vector[class_idx]),
+ 'label': labels[class_idx]})
+
+ print('')
+ print('Top classification: {}'.format(top_classification))
+ cm_status['top_classification'] = top_classification
+
+avg_time = (time.time() - start_time) / batch_count
+cm_status['avg_time'] = avg_time
+
+# Record cm_status to embedded it into CM workflows
+with open('tmp-run-state.json', 'w') as cm_file:
+ cm_file.write(json.dumps(
+ {'cm_app_image_classification_onnx_py': cm_status}, sort_keys=True, indent=2))
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/tests/README.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/tests/README.md
new file mode 100644
index 0000000000..899509cb7f
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/tests/README.md
@@ -0,0 +1,14 @@
+```bash
+docker system prune -a -f
+
+cmr "download file _wget" --url=https://cKnowledge.org/ai/data/computer_mouse.jpg --verify=no --env.CM_DOWNLOAD_CHECKSUM=45ae5c940233892c2f860efdf0b66e7e
+
+cm docker script "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --env.CM_IMAGE=computer_mouse.jpg
+cm docker script "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --input=computer_mouse.jpg
+
+cmrd "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --input=computer_mouse.jpg -j --docker_it
+
+cmrd "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --input=computer_mouse.jpg --output=.
+
+
+```
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/COPYRIGHT.md
new file mode 100644
index 0000000000..696f829223
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone.
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/README-extra.md
new file mode 100644
index 0000000000..5e59c8fede
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/README-extra.md
@@ -0,0 +1,3 @@
+# Image Classification App in C++ for ResNet50 model
+
+* In development stage, not complete
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/README.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/README.md
new file mode 100644
index 0000000000..7f5dcacbd2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/README.md
@@ -0,0 +1 @@
+Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tf-onnx-cpp](https://docs.mlcommons.org/cm4mlops/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tf-onnx-cpp) for the documentation of this CM script.
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/_cm.yaml
new file mode 100644
index 0000000000..c7ee8b5601
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/_cm.yaml
@@ -0,0 +1,27 @@
+alias: app-image-classification-tf-onnx-cpp
+automation_alias: script
+automation_uid: 5b4e0237da074764
+category: Modular AI/ML application pipeline
+default_env:
+ CM_BATCH_COUNT: '1'
+ CM_BATCH_SIZE: '1'
+deps:
+- tags: detect,os
+- tags: get,sys-utils-cm
+- tags: get,gcc
+- tags: get,dataset,image-classification,original
+- tags: get,dataset-aux,image-classification
+- tags: get,ml-model,raw,image-classification,resnet50,_onnx,_opset-11
+- tags: tensorflow,from-src
+ version: v2.0.0
+tags:
+- app
+- image-classification
+- tf
+- tensorflow
+- tf-onnx
+- tensorflow-onnx
+- onnx
+- cpp
+tags_help: app image-classification cpp tensorflow onnx
+uid: 879ed32e47074033
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/include/benchmark.h b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/include/benchmark.h
new file mode 100644
index 0000000000..42b0418fce
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/include/benchmark.h
@@ -0,0 +1,511 @@
+/*
+ * Copyright (c) 2018 cTuning foundation.
+ * See CK COPYRIGHT.txt for copyright details.
+ *
+ * See CK LICENSE for licensing details.
+ * See CK COPYRIGHT for copyright details.
+ */
+
+#pragma once
+
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+//#include
+
+#define DEBUG(msg) std::cout << "DEBUG: " << msg << std::endl;
+
+namespace CK {
+
+enum _TIMERS {
+ X_TIMER_SETUP,
+ X_TIMER_TEST,
+
+ X_TIMER_COUNT
+};
+
+enum _VARS {
+ X_VAR_TIME_SETUP,
+ X_VAR_TIME_TEST,
+ X_VAR_TIME_IMG_LOAD_TOTAL,
+ X_VAR_TIME_IMG_LOAD_AVG,
+ X_VAR_TIME_CLASSIFY_TOTAL,
+ X_VAR_TIME_CLASSIFY_AVG,
+
+ X_VAR_COUNT
+};
+
+enum MODEL_TYPE {
+ LITE,
+ TF_FROZEN
+};
+
+/// Store named value into xopenme variable.
+inline void store_value_f(int index, const char* name, float value) {
+ char* json_name = new char[strlen(name) + 6];
+ sprintf(json_name, "\"%s\":%%f", name);
+ //xopenme_add_var_f(index, json_name, value);
+ delete[] json_name;
+}
+
+/// Load mandatory string value from the environment.
+inline std::string getenv_s(const std::string& name) {
+ const char *value = getenv(name.c_str());
+ if (!value)
+ throw "Required environment variable " + name + " is not set";
+ return std::string(value);
+}
+
+/// Load mandatory integer value from the environment.
+inline int getenv_i(const std::string& name) {
+ const char *value = getenv(name.c_str());
+ if (!value)
+ throw "Required environment variable " + name + " is not set";
+ return atoi(value);
+}
+
+/// Load mandatory float value from the environment.
+inline float getenv_f(const std::string& name) {
+ const char *value = getenv(name.c_str());
+ if (!value)
+ throw "Required environment variable " + name + " is not set";
+ return atof(value);
+}
+
+/// Dummy `sprintf` like formatting function using std::string.
+/// It uses buffer of fixed length so can't be used in any cases,
+/// generally use it for short messages with numeric arguments.
+template
+inline std::string format(const char* str, Args ...args) {
+ char buf[1024];
+ sprintf(buf, str, args...);
+ return std::string(buf);
+}
+
+//----------------------------------------------------------------------
+
+class Accumulator {
+public:
+ void reset() { _total = 0, _count = 0; }
+ void add(float value) { _total += value, _count++; }
+ float total() const { return _total; }
+ float avg() const { return _total / static_cast(_count); }
+private:
+ float _total = 0;
+ int _count = 0;
+};
+
+//----------------------------------------------------------------------
+
+class BenchmarkSettings {
+public:
+ const std::string images_dir = getenv_s("CK_ENV_DATASET_IMAGENET_PREPROCESSED_DIR");
+ const std::string images_file = getenv_s("CK_ENV_DATASET_IMAGENET_PREPROCESSED_SUBSET_FOF");
+ const bool skip_internal_preprocessing = getenv("CK_ENV_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE")
+ && ( getenv_s("CK_ENV_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE") == "float32" );
+
+ const std::string result_dir = getenv_s("CK_RESULTS_DIR");
+ const std::string input_layer_name = getenv_s("CK_ENV_TENSORFLOW_MODEL_INPUT_LAYER_NAME");
+ const std::string output_layer_name = getenv_s("CK_ENV_TENSORFLOW_MODEL_OUTPUT_LAYER_NAME");
+ const int batch_count = getenv_i("CK_BATCH_COUNT");
+ const int batch_size = getenv_i("CK_BATCH_SIZE");
+ const int image_size = getenv_i("CK_ENV_DATASET_IMAGENET_PREPROCESSED_INPUT_SQUARE_SIDE");
+ const int num_channels = 3;
+ const int num_classes = 1000;
+ const bool normalize_img = getenv_s("CK_ENV_TENSORFLOW_MODEL_NORMALIZE_DATA") == "YES";
+ const bool subtract_mean = getenv_s("CK_ENV_TENSORFLOW_MODEL_SUBTRACT_MEAN") == "YES";
+ const char *given_channel_means_str = getenv("CM_ML_MODEL_GIVEN_CHANNEL_MEANS");
+
+ const bool full_report = getenv_i("CK_SILENT_MODE") == 0;
+
+ BenchmarkSettings(enum MODEL_TYPE mode = MODEL_TYPE::LITE) {
+
+ if(given_channel_means_str) {
+ std::stringstream ss(given_channel_means_str);
+ for(int i=0;i<3;i++){
+ ss >> given_channel_means[i];
+ }
+ }
+
+ switch (mode)
+ {
+ case MODEL_TYPE::LITE:
+ _graph_file = getenv_s("CK_ENV_TENSORFLOW_MODEL_TFLITE_FILEPATH");
+ break;
+
+ case MODEL_TYPE::TF_FROZEN:
+ _graph_file = getenv_s("CK_ENV_TENSORFLOW_MODEL_TF_FROZEN_FILEPATH");
+ break;
+
+ default:
+ std::cout << "Unsupported MODEL_TYPE" << std::endl;
+ exit(1);
+ break;
+ };
+ _number_of_threads = std::thread::hardware_concurrency();
+ _number_of_threads = _number_of_threads < 1 ? 1 : _number_of_threads;
+ _number_of_threads = !getenv("CK_HOST_CPU_NUMBER_OF_PROCESSORS")
+ ? _number_of_threads
+ : getenv_i("CK_HOST_CPU_NUMBER_OF_PROCESSORS");
+
+ // Print settings
+ std::cout << "Graph file: " << _graph_file << std::endl;
+ std::cout << "Image dir: " << images_dir << std::endl;
+ std::cout << "Image list: " << images_file << std::endl;
+ std::cout << "Image size: " << image_size << std::endl;
+ std::cout << "Image channels: " << num_channels << std::endl;
+ std::cout << "Prediction classes: " << num_classes << std::endl;
+ std::cout << "Result dir: " << result_dir << std::endl;
+ std::cout << "Batch count: " << batch_count << std::endl;
+ std::cout << "Batch size: " << batch_size << std::endl;
+ std::cout << "Normalize: " << normalize_img << std::endl;
+ std::cout << "Subtract mean: " << subtract_mean << std::endl;
+ if(subtract_mean && given_channel_means_str)
+ std::cout << "Per-channel means to subtract: " << given_channel_means[0]
+ << ", " << given_channel_means[1]
+ << ", " << given_channel_means[2] << std::endl;
+
+ // Create results dir if none
+ auto dir = opendir(result_dir.c_str());
+ if (dir)
+ closedir(dir);
+ else
+ system(("mkdir " + result_dir).c_str());
+
+ // Load list of images to be processed
+ std::ifstream file(images_file);
+ if (!file)
+ throw "Unable to open image list file " + images_file;
+ for (std::string s; !getline(file, s).fail();)
+ _image_list.emplace_back(s);
+ std::cout << "Image count in file: " << _image_list.size() << std::endl;
+ }
+
+ const std::vector& image_list() const { return _image_list; }
+
+ std::vector _image_list;
+
+ int number_of_threads() { return _number_of_threads; }
+
+ std::string graph_file() { return _graph_file; }
+
+ float given_channel_means[3];
+private:
+ int _number_of_threads;
+ std::string _graph_file;
+};
+
+//----------------------------------------------------------------------
+
+class BenchmarkSession {
+public:
+ BenchmarkSession(const BenchmarkSettings* settings): _settings(settings) {
+ }
+
+ virtual ~BenchmarkSession() {}
+
+ float total_load_images_time() const { return _loading_time.total(); }
+ float total_prediction_time() const { return _total_prediction_time; }
+ float avg_load_images_time() const { return _loading_time.avg(); }
+ float avg_prediction_time() const { return _prediction_time.avg(); }
+
+ bool get_next_batch() {
+ if (_batch_index+1 == _settings->batch_count)
+ return false;
+ _batch_index++;
+ int batch_number = _batch_index+1;
+ if (_settings->full_report || batch_number%10 == 0)
+ std::cout << "\nBatch " << batch_number << " of " << _settings->batch_count << std::endl;
+ int begin = _batch_index * _settings->batch_size;
+ int end = (_batch_index + 1) * _settings->batch_size;
+ int images_count = _settings->image_list().size();
+ if (begin >= images_count || end > images_count)
+ throw format("Not enough images to populate batch %d", _batch_index);
+ _batch_files.clear();
+ for (int i = begin; i < end; i++)
+ _batch_files.emplace_back(_settings->image_list()[i]);
+ return true;
+ }
+
+ /// Begin measuring of new benchmark stage.
+ /// Only one stage can be measured at a time.
+ void measure_begin() {
+ _start_time = std::chrono::high_resolution_clock::now();
+ }
+
+ /// Finish measuring of batch loading stage
+ float measure_end_load_images() {
+ float duration = measure_end();
+ if (_settings->full_report)
+ std::cout << "Batch loaded in " << duration << " s" << std::endl;
+ _loading_time.add(duration);
+ return duration;
+ }
+
+ /// Finish measuring of batch prediction stage
+ float measure_end_prediction() {
+ float duration = measure_end();
+ _total_prediction_time += duration;
+ if (_settings->full_report)
+ std::cout << "Batch classified in " << duration << " s" << std::endl;
+ // Skip first batch in order to account warming-up the system
+ if (_batch_index > 0 || _settings->batch_count == 1)
+ _prediction_time.add(duration);
+ return duration;
+ }
+
+ int batch_index() const { return _batch_index; }
+ const std::vector& batch_files() const { return _batch_files; }
+
+private:
+ int _batch_index = -1;
+ Accumulator _loading_time;
+ Accumulator _prediction_time;
+ const BenchmarkSettings* _settings;
+ float _total_prediction_time = 0;
+ std::vector _batch_files;
+ std::chrono::time_point _start_time;
+
+ float measure_end() const {
+ auto finish_time = std::chrono::high_resolution_clock::now();
+ std::chrono::duration elapsed = finish_time - _start_time;
+ return static_cast(elapsed.count());
+ }
+};
+
+//----------------------------------------------------------------------
+
+inline void init_benchmark() {
+ //xopenme_init(X_TIMER_COUNT, X_VAR_COUNT);
+}
+
+inline void finish_benchmark(const BenchmarkSession& s) {
+ // Store metrics
+ /* store_value_f(X_VAR_TIME_SETUP, "setup_time_s", xopenme_get_timer(X_TIMER_SETUP));
+ store_value_f(X_VAR_TIME_TEST, "test_time_s", xopenme_get_timer(X_TIMER_TEST));
+ store_value_f(X_VAR_TIME_IMG_LOAD_TOTAL, "images_load_time_total_s", s.total_load_images_time());
+ store_value_f(X_VAR_TIME_IMG_LOAD_AVG, "images_load_time_avg_s", s.avg_load_images_time());
+ store_value_f(X_VAR_TIME_CLASSIFY_TOTAL, "prediction_time_total_s", s.total_prediction_time());
+ store_value_f(X_VAR_TIME_CLASSIFY_AVG, "prediction_time_avg_s", s.avg_prediction_time());
+
+ // Finish xopenmp
+ xopenme_dump_state();
+ xopenme_finish();*/
+}
+
+template
+void measure_setup(L &&lambda_function) {
+ //xopenme_clock_start(X_TIMER_SETUP);
+ lambda_function();
+ //xopenme_clock_end(X_TIMER_SETUP);
+}
+
+template
+void measure_prediction(L &&lambda_function) {
+ //xopenme_clock_start(X_TIMER_TEST);
+ lambda_function();
+ //xopenme_clock_end(X_TIMER_TEST);
+}
+
+//----------------------------------------------------------------------
+
+template
+class StaticBuffer {
+public:
+ StaticBuffer(int size, const std::string& dir): _size(size), _dir(dir) {
+ _buffer = new TData[size];
+ }
+
+ virtual ~StaticBuffer() {
+ delete[] _buffer;
+ }
+
+ TData* data() const { return _buffer; }
+ int size() const { return _size; }
+
+protected:
+ const int _size;
+ const std::string _dir;
+ TData* _buffer;
+};
+
+//----------------------------------------------------------------------
+
+class ImageData : public StaticBuffer {
+public:
+ ImageData(const BenchmarkSettings* s): StaticBuffer(
+ s->image_size * s->image_size * s->num_channels * (s->skip_internal_preprocessing ? sizeof(float) : sizeof(uint8_t)),
+ s->images_dir) {}
+
+ void load(const std::string& filename) {
+ auto path = _dir + '/' + filename;
+ std::ifstream file(path, std::ios::in | std::ios::binary);
+ if (!file) throw "Failed to open image data " + path;
+ file.read(reinterpret_cast(_buffer), _size);
+ }
+};
+
+//----------------------------------------------------------------------
+
+class ResultData : public StaticBuffer {
+public:
+ ResultData(const BenchmarkSettings* s): StaticBuffer(
+ s->num_classes, s->result_dir) {}
+
+ void save(const std::string& filename) {
+ auto path = _dir + '/' + filename + ".txt";
+ std::ofstream file(path);
+ if (!file) throw "Unable to create result file " + path;
+ for (int i = 0; i < _size; i++)
+ file << _buffer[i] << std::endl;
+ }
+};
+
+//----------------------------------------------------------------------
+
+class IBenchmark {
+public:
+ bool has_background_class = false;
+
+ virtual ~IBenchmark() {}
+ virtual void load_images(const std::vector& batch_images) = 0;
+ virtual void save_results(const std::vector& batch_images) = 0;
+};
+
+
+template
+class Benchmark : public IBenchmark {
+public:
+ Benchmark(const BenchmarkSettings* settings, TData *in_ptr, TData *out_ptr) {
+ _in_ptr = in_ptr;
+ _out_ptr = out_ptr;
+ _in_data.reset(new ImageData(settings));
+ _out_data.reset(new ResultData(settings));
+ _in_converter.reset(new TInConverter(settings));
+ _out_converter.reset(new TOutConverter(settings));
+ }
+
+ void load_images(const std::vector& batch_images) override {
+ int image_offset = 0;
+ for (auto image_file : batch_images) {
+ _in_data->load(image_file);
+ _in_converter->convert(_in_data.get(), _in_ptr + image_offset);
+ image_offset += _in_data->size();
+ }
+ }
+
+ void save_results(const std::vector& batch_images) override {
+ int image_offset = 0;
+ int probe_offset = has_background_class ? 1 : 0;
+ for (auto image_file : batch_images) {
+ _out_converter->convert(_out_ptr + image_offset + probe_offset, _out_data.get());
+ _out_data->save(image_file);
+ image_offset += _out_data->size() + probe_offset;
+ }
+ }
+
+private:
+ TData* _in_ptr;
+ TData* _out_ptr;
+ std::unique_ptr _in_data;
+ std::unique_ptr _out_data;
+ std::unique_ptr _in_converter;
+ std::unique_ptr _out_converter;
+};
+
+//----------------------------------------------------------------------
+
+class IinputConverter {
+public:
+ virtual ~IinputConverter() {}
+ virtual void convert(const ImageData* source, void* target) = 0;
+};
+
+//----------------------------------------------------------------------
+
+class InCopy : public IinputConverter {
+public:
+ InCopy(const BenchmarkSettings* s) {}
+
+ void convert(const ImageData* source, void* target) {
+ uint8_t *uint8_target = static_cast(target);
+ std::copy(source->data(), source->data() + source->size(), uint8_target);
+ }
+};
+
+//----------------------------------------------------------------------
+
+class InNormalize : public IinputConverter {
+public:
+ InNormalize(const BenchmarkSettings* s):
+ _normalize_img(s->normalize_img),
+ _subtract_mean(s->subtract_mean),
+ _given_channel_means(s->given_channel_means),
+ _num_channels(s->num_channels) {
+ }
+
+ void convert(const ImageData* source, void* target) {
+ // Copy image data to target
+ float *float_target = static_cast(target);
+ float sum = 0;
+ for (int i = 0; i < source->size(); i++) {
+ float px = source->data()[i];
+ if (_normalize_img)
+ px = (px / 255.0 - 0.5) * 2.0;
+ sum += px;
+ float_target[i] = px;
+ }
+ // Subtract mean value if required
+ if (_subtract_mean) {
+ if(_given_channel_means) {
+ for (int i = 0; i < source->size(); i++)
+ float_target[i] -= _given_channel_means[i % _num_channels]; // assuming NHWC order!
+ } else {
+ float mean = sum / static_cast(source->size());
+ for (int i = 0; i < source->size(); i++)
+ float_target[i] -= mean;
+ }
+ }
+ }
+
+private:
+ const bool _normalize_img;
+ const bool _subtract_mean;
+ const float *_given_channel_means;
+ const int _num_channels;
+};
+
+//----------------------------------------------------------------------
+
+class OutCopy {
+public:
+ OutCopy(const BenchmarkSettings* s) {}
+
+ void convert(const float* source, ResultData* target) const {
+ std::copy(source, source + target->size(), target->data());
+ }
+};
+
+//----------------------------------------------------------------------
+
+class OutDequantize {
+public:
+ OutDequantize(const BenchmarkSettings* s) {}
+
+ void convert(const uint8_t* source, ResultData* target) const {
+ for (int i = 0; i < target->size(); i++)
+ target->data()[i] = source[i] / 255.0;
+ }
+};
+
+} // namespace CK
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/run.sh
new file mode 100644
index 0000000000..b4a46853bc
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/run.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD}
+${CM_CXX_COMPILER_WITH_PATH} -O3 ${CM_TMP_CURRENT_SCRIPT_PATH}/src/classification.cpp -o classification.exe -ltensorflow
+
+test $? -eq 0 || exit 1
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/src/classification.cpp b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/src/classification.cpp
new file mode 100644
index 0000000000..a9ee5ee50e
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/src/classification.cpp
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2018 cTuning foundation.
+ * See CK COPYRIGHT.txt for copyright details.
+ *
+ * See CK LICENSE for licensing details.
+ * See CK COPYRIGHT for copyright details.
+ */
+
+// TODO: this header should be moved to a common location (where?)
+#include "../include/benchmark.h"
+
+#include "tensorflow/core/public/session.h"
+#include "tensorflow/cc/framework/scope.h"
+
+using namespace std;
+using namespace CK;
+using namespace tensorflow;
+
+int main(int argc, char* argv[]) {
+ try {
+ init_benchmark();
+
+ BenchmarkSettings settings(MODEL_TYPE::TF_FROZEN);
+ BenchmarkSession session(&settings);
+ ImageData input_data(&settings);
+ ResultData result_data(&settings);
+ unique_ptr input_converter;
+ OutCopy result_converter(&settings);
+ unique_ptr tf_session;
+ GraphDef graph_def;
+
+ if (settings.skip_internal_preprocessing)
+ input_converter.reset(new InCopy(&settings));
+ else
+ input_converter.reset(new InNormalize(&settings));
+
+ // TODO: this option is for TF mobilenets, but generally should be evaluated
+ // from weights package somehow (supported number or classes in meta?)
+ // TODO: this problem is related to the absence of a knowledge about
+ // required image size for particular image recognition network package.
+ // TODO: We have to provide common set of parameters for all image-recognition packages.
+ const bool has_background_class = true;
+
+ cout << "\nLoading graph..." << endl;
+ measure_setup([&]{
+ Status status = ReadBinaryProto(Env::Default(), settings.graph_file(), &graph_def);
+ if (!status.ok())
+ throw "Failed to load graph: " + status.ToString();
+
+ tf_session.reset(NewSession(SessionOptions()));
+
+ status = tf_session->Create(graph_def);
+ if (!status.ok())
+ throw "Failed to create new session: " + status.ToString();
+ });
+
+ cout << "\nProcessing batches..." << endl;
+ measure_prediction([&]{
+ Tensor input(DT_FLOAT, TensorShape({settings.batch_size,
+ settings.image_size,
+ settings.image_size,
+ settings.num_channels}));
+ float* input_ptr = input.flat().data();
+ vector outputs;
+
+ while (session.get_next_batch()) {
+ // Load batch
+ session.measure_begin();
+ int image_offset = 0;
+ for (auto image_file : session.batch_files()) {
+ input_data.load(image_file);
+ input_converter->convert(&input_data, input_ptr + image_offset);
+ image_offset += input_data.size();
+ }
+ session.measure_end_load_images();
+
+ // Classify current batch
+ session.measure_begin();
+ Status status = tf_session->Run(
+ {{settings.input_layer_name, input}}, {settings.output_layer_name}, {}, &outputs);
+ if (!status.ok())
+ throw "Running model failed: " + status.ToString();
+ session.measure_end_prediction();
+
+ // Process output tensor
+ auto output_flat = outputs[0].flat();
+ if (output_flat.size() != settings.batch_size * (settings.num_classes + 1))
+ throw format("Output tensor has size of %d, but expected size is %d",
+ output_flat.size(), settings.batch_size * (settings.num_classes + 1));
+ image_offset = 0;
+ int probe_offset = has_background_class ? 1 : 0;
+ for (auto image_file : session.batch_files()) {
+ result_converter.convert(output_flat.data() + image_offset + probe_offset, &result_data);
+ result_data.save(image_file);
+ image_offset += result_data.size() + probe_offset;
+ }
+ }
+ });
+
+ finish_benchmark(session);
+ }
+ catch (const string& error_message) {
+ cerr << "ERROR: " << error_message << endl;
+ return -1;
+ }
+ return 0;
+}
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/COPYRIGHT.md
new file mode 100644
index 0000000000..696f829223
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone.
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/README-extra.md
new file mode 100644
index 0000000000..6628885061
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/README-extra.md
@@ -0,0 +1,16 @@
+# CPU
+
+## 20240129; Windows 11
+
+```bash
+cmr "get generic-python-lib _package.torch" --version=2.1.1
+cmr "get generic-python-lib _package.torchvision" --version=0.16.2
+```
+
+# CUDA
+
+```bash
+cm run script "install python-venv" --name=test
+cm run script "python app image-classification pytorch _cuda" --adr.python.name=test
+cm run script "python app image-classification pytorch _cuda" --adr.python.name=test --input=src/computer_mouse.jpg
+```
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/README.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/README.md
new file mode 100644
index 0000000000..2d1b951fb6
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/README.md
@@ -0,0 +1 @@
+Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-AI-ML-application-pipeline/app-image-classification-torch-py](https://docs.mlcommons.org/cm4mlops/scripts/Modular-AI-ML-application-pipeline/app-image-classification-torch-py) for the documentation of this CM script.
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/_cm.yaml
new file mode 100644
index 0000000000..6684bb7370
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/_cm.yaml
@@ -0,0 +1,46 @@
+alias: app-image-classification-torch-py
+automation_alias: script
+automation_uid: 5b4e0237da074764
+category: Modular AI/ML application pipeline
+default_env:
+ CM_BATCH_COUNT: '1'
+ CM_BATCH_SIZE: '1'
+deps:
+- tags: detect,os
+- names:
+ - python
+ - python3
+ tags: get,python3
+- tags: get,dataset,imagenet,image-classification,preprocessed
+- tags: get,dataset-aux,imagenet-aux,image-classification
+- tags: get,imagenet-helper
+- tags: get,ml-model,image-classification,resnet50,_pytorch,_fp32
+- skip_if_env:
+ USE_CUDA:
+ - 'yes'
+ tags: get,generic-python-lib,_torch
+- enable_if_env:
+ USE_CUDA:
+ - 'yes'
+ tags: get,generic-python-lib,_torch_cuda
+- skip_if_env:
+ USE_CUDA:
+ - 'yes'
+ tags: get,generic-python-lib,_torchvision
+- enable_if_env:
+ USE_CUDA:
+ - 'yes'
+ tags: get,generic-python-lib,_torchvision_cuda
+tags:
+- app
+- image-classification
+- torch
+- python
+tags_help: app image-classification python torch
+uid: e3986ae887b84ca8
+variations:
+ cuda:
+ deps:
+ - tags: get,cuda
+ env:
+ USE_CUDA: 'yes'
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/img/computer_mouse.jpg b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/img/computer_mouse.jpg
new file mode 100644
index 0000000000..e7f8abb6fe
Binary files /dev/null and b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/img/computer_mouse.jpg differ
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/requirements.txt b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/requirements.txt
new file mode 100644
index 0000000000..d1c427e4aa
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/requirements.txt
@@ -0,0 +1,4 @@
+Pillow
+requests
+numpy
+
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/run.bat b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/run.bat
new file mode 100644
index 0000000000..1415d4265b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/run.bat
@@ -0,0 +1,20 @@
+rem connect CM portable scripts with CK env
+
+set CM_ML_TORCH_MODEL_NAME=resnet50
+set CM_ML_MODEL_INPUT_DATA_TYPE=float32
+set CM_ML_MODEL_IMAGE_HEIGHT=224
+set CM_ML_MODEL_IMAGE_WIDTH=224
+
+rem set CM_DATASET_IMAGENET_PREPROCESSED_DIR=%CM_DATASET_PREPROCESSED_PATH%
+
+set CM_DATASET_IMAGENET_PREPROCESSED_DIR=%CM_DATASET_PREPROCESSED_FULL_PATH%
+set CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT=%CM_DATASET_AUX_PATH%\synset_words.txt
+set CM_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE=float32
+set CM_RESULTS_DIR=%CM_TMP_CURRENT_SCRIPT_PATH%\results
+set ML_MODEL_DATA_LAYOUT=NCHW
+
+%CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt
+IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
+
+%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\src\pytorch_classify_preprocessed.py
+IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/run.sh
new file mode 100644
index 0000000000..b50b79eb40
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/run.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD}
+
+# connect CM intelligent components with CK env
+export CM_ML_TORCH_MODEL_NAME=resnet50
+export CM_ML_MODEL_INPUT_DATA_TYPE=float32
+export CM_ML_MODEL_IMAGE_HEIGHT=224
+export CM_ML_MODEL_IMAGE_WIDTH=224
+export CM_DATASET_IMAGENET_PREPROCESSED_DIR=${CM_DATASET_PREPROCESSED_FULL_PATH}
+export CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT=${CM_DATASET_AUX_PATH}/synset_words.txt
+export CM_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE=float32
+export CM_RESULTS_DIR=${CM_TMP_CURRENT_SCRIPT_PATH}/results
+export ML_MODEL_DATA_LAYOUT=NCHW
+
+${CM_PYTHON_BIN} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt
+test $? -eq 0 || exit 1
+
+${CM_PYTHON_BIN} ${CM_TMP_CURRENT_SCRIPT_PATH}/src/pytorch_classify_preprocessed.py
+test $? -eq 0 || exit 1
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py
new file mode 100644
index 0000000000..863b3a6513
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py
@@ -0,0 +1,222 @@
+#!/usr/bin/env python3
+
+import json
+import time
+import os
+import shutil
+import numpy as np
+
+
+import torch
+import torchvision.models as models
+
+import imagenet_helper
+from imagenet_helper import (
+ load_preprocessed_batch,
+ image_list,
+ class_labels,
+ BATCH_SIZE)
+
+# Writing the results out:
+#
+RESULTS_DIR = os.getenv('CM_RESULTS_DIR')
+FULL_REPORT = os.getenv(
+ 'CM_SILENT_MODE',
+ '0') in (
+ 'NO',
+ 'no',
+ 'OFF',
+ 'off',
+ '0')
+
+# Processing by batches:
+#
+BATCH_COUNT = int(os.getenv('CM_BATCH_COUNT', 1))
+
+# Enabling GPU if available and not disabled:
+#
+USE_CUDA = (os.getenv('USE_CUDA', '').strip() == 'yes')
+
+
+labels_path = os.environ['CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT']
+
+
+def load_labels(labels_filepath):
+ my_labels = []
+ input_file = open(labels_filepath, 'r')
+ for l in input_file:
+ my_labels.append(l.strip())
+ return my_labels
+
+
+labels = load_labels(labels_path)
+
+
+data_layout = os.environ['ML_MODEL_DATA_LAYOUT']
+
+
+def main():
+ global BATCH_SIZE
+ global BATCH_COUNT
+
+ setup_time_begin = time.time()
+
+ bg_class_offset = 0
+
+ # Cleanup results directory
+ if os.path.isdir(RESULTS_DIR):
+ shutil.rmtree(RESULTS_DIR)
+ os.mkdir(RESULTS_DIR)
+
+ # Load the [cached] Torch model
+ path_to_model_pth = os.environ['CM_ML_MODEL_FILE_WITH_PATH']
+
+ model = models.resnet50(pretrained=False)
+ model.load_state_dict(torch.load(path_to_model_pth))
+
+ model.eval()
+
+ # move the model to GPU for speed if available
+ if USE_CUDA:
+ model.to('cuda')
+
+ setup_time = time.time() - setup_time_begin
+
+ # Run batched mode
+ test_time_begin = time.time()
+ image_index = 0
+ total_load_time = 0
+ total_classification_time = 0
+ first_classification_time = 0
+ images_loaded = 0
+
+ image_path = os.environ.get('CM_INPUT', '')
+ if image_path != '':
+
+ normalize_data_bool = True
+ subtract_mean_bool = False
+
+ from PIL import Image
+
+ def load_and_resize_image(image_filepath, height, width):
+ pillow_img = Image.open(image_filepath).resize(
+ (width, height)) # sic! The order of dimensions in resize is (W,H)
+
+ input_data = np.float32(pillow_img)
+
+ # Normalize
+ if normalize_data_bool:
+ input_data = input_data / 127.5 - 1.0
+
+ # Subtract mean value
+ if subtract_mean_bool:
+ if len(given_channel_means):
+ input_data -= given_channel_means
+ else:
+ input_data -= np.mean(input_data)
+
+ # print(np.array(pillow_img).shape)
+ nhwc_data = np.expand_dims(input_data, axis=0)
+
+ if data_layout == 'NHWC':
+ # print(nhwc_data.shape)
+ return nhwc_data
+ else:
+ nchw_data = nhwc_data.transpose(0, 3, 1, 2)
+ # print(nchw_data.shape)
+ return nchw_data
+
+ BATCH_COUNT = 1
+
+ for batch_index in range(BATCH_COUNT):
+ batch_number = batch_index + 1
+ if FULL_REPORT or (batch_number % 10 == 0):
+ print("\nBatch {} of {}".format(batch_number, BATCH_COUNT))
+
+ begin_time = time.time()
+
+ if image_path == '':
+ batch_data, image_index = load_preprocessed_batch(
+ image_list, image_index)
+ else:
+ batch_data = load_and_resize_image(image_path, 224, 224)
+ image_index = 1
+
+ torch_batch = torch.from_numpy(batch_data)
+
+ load_time = time.time() - begin_time
+ total_load_time += load_time
+ images_loaded += BATCH_SIZE
+ if FULL_REPORT:
+ print("Batch loaded in %fs" % (load_time))
+
+ # Classify one batch
+ begin_time = time.time()
+
+ # move the input to GPU for speed if available
+ if USE_CUDA:
+ torch_batch = torch_batch.to('cuda')
+
+ with torch.no_grad():
+ batch_results = model(torch_batch)
+
+ classification_time = time.time() - begin_time
+ if FULL_REPORT:
+ print("Batch classified in %fs" % (classification_time))
+
+ total_classification_time += classification_time
+ # Remember first batch prediction time
+ if batch_index == 0:
+ first_classification_time = classification_time
+
+ # Process results
+ for index_in_batch in range(BATCH_SIZE):
+ # skipping the background class on the left (if present)
+ softmax_vector = batch_results[index_in_batch][bg_class_offset:]
+ global_index = batch_index * BATCH_SIZE + index_in_batch
+
+ res_file = os.path.join(RESULTS_DIR, image_list[global_index])
+
+ with open(res_file + '.txt', 'w') as f:
+ for prob in softmax_vector:
+ f.write('{}\n'.format(prob))
+
+ top5_indices = list(reversed(softmax_vector.argsort()))[:5]
+ for class_idx in top5_indices:
+ print(
+ "\t{}\t{}\t{}".format(
+ class_idx,
+ softmax_vector[class_idx],
+ labels[class_idx]))
+ print("")
+
+ test_time = time.time() - test_time_begin
+
+ if BATCH_COUNT > 1:
+ avg_classification_time = (
+ total_classification_time - first_classification_time) / (images_loaded - BATCH_SIZE)
+ else:
+ avg_classification_time = total_classification_time / images_loaded
+
+ avg_load_time = total_load_time / images_loaded
+
+ # Store benchmarking results:
+ output_dict = {
+ 'setup_time_s': setup_time,
+ 'test_time_s': test_time,
+ 'images_load_time_total_s': total_load_time,
+ 'images_load_time_avg_s': avg_load_time,
+ 'prediction_time_total_s': total_classification_time,
+ 'prediction_time_avg_s': avg_classification_time,
+
+ 'avg_time_ms': avg_classification_time * 1000,
+ 'avg_fps': 1.0 / avg_classification_time,
+ 'batch_time_ms': avg_classification_time * 1000 * BATCH_SIZE,
+ 'batch_size': BATCH_SIZE,
+ }
+ with open('tmp-ck-timer.json', 'w') as out_file:
+ json.dump(output_dict, out_file, indent=4, sort_keys=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/COPYRIGHT.md
new file mode 100644
index 0000000000..696f829223
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone.
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/README-extra.md
new file mode 100644
index 0000000000..c24e073a99
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/README-extra.md
@@ -0,0 +1,16 @@
+Example:
+
+```bash
+cm run script "get llvm" --version=14.0.0
+cm run script "get tvm _llvm" --version=0.10.0
+cm run script "python app image-classification tvm-onnx"
+```
+
+Example 2:
+
+```bash
+cm run script "install python-venv" --name=test --version=3.10.7
+cm run script "get generic-python-lib _apache-tvm"
+cm run script "python app image-classification tvm-onnx _tvm-pip-install"
+cm run script "python app image-classification tvm-onnx _tvm-pip-install" --input=`cm find script --tags=python,app,image-classification,tvm-onnx`/img/computer_mouse.jpg
+```
\ No newline at end of file
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/README.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/README.md
new file mode 100644
index 0000000000..317018ce02
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/README.md
@@ -0,0 +1 @@
+Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tvm-onnx-py](https://docs.mlcommons.org/cm4mlops/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tvm-onnx-py) for the documentation of this CM script.
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/_cm.yaml
new file mode 100644
index 0000000000..2b5cc9cca5
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/_cm.yaml
@@ -0,0 +1,48 @@
+alias: app-image-classification-tvm-onnx-py
+uid: 63080407db4d4ac4
+
+automation_alias: script
+automation_uid: 5b4e0237da074764
+
+category: Modular AI/ML application pipeline
+
+default_env:
+ CM_BATCH_COUNT: '1'
+ CM_BATCH_SIZE: '1'
+
+deps:
+- tags: detect,os
+- tags: detect,cpu
+- names:
+ - python
+ - python3
+ tags: get,python3
+- tags: get,dataset,image-classification,original
+- tags: get,dataset-aux,image-classification
+- tags: get,raw,ml-model,image-classification,resnet50,_onnx
+- tags: get,generic-python-lib,_onnxruntime
+- names:
+ - tvm
+ tags: get,tvm
+
+tags:
+- app
+- image-classification
+- tvm-onnx
+- python
+
+tags_help: app image-classification python tvm-onnx
+
+variations:
+ cuda:
+ add_deps_recursive:
+ tvm:
+ tags: _cuda
+ deps:
+ - tags: get,cuda
+ env:
+ USE_CUDA: 'yes'
+ llvm:
+ add_deps_recursive:
+ tvm:
+ tags: _llvm
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/img/computer_mouse.jpg b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/img/computer_mouse.jpg
new file mode 100644
index 0000000000..e7f8abb6fe
Binary files /dev/null and b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/img/computer_mouse.jpg differ
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/requirements.txt b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/requirements.txt
new file mode 100644
index 0000000000..ae4aff7eae
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/requirements.txt
@@ -0,0 +1,7 @@
+matplotlib
+opencv-python
+scipy
+onnx
+decorator
+attrs
+psutil
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/run.sh
new file mode 100644
index 0000000000..8eb0660771
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/run.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD}
+
+#if [[ ${CM_HOST_PLATFORM_FLAVOR} == "arm64" ]]; then
+# ${CM_PYTHON_BIN} -m pip install -i https://test.pypi.org/simple/ onnxruntime==1.9.0.dev174552
+#fi
+
+export USE_TVM=yes
+
+
+wget -nc https://raw.githubusercontent.com/mlcommons/ck-mlops/main/program/ml-task-image-classification-tvm-onnx-cpu/synset.txt
+test $? -eq 0 || exit 1
+
+${CM_PYTHON_BIN} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt
+test $? -eq 0 || exit 1
+
+if [[ "${CM_INPUT}" != "" ]]; then
+ export CM_IMAGE=${CM_INPUT}
+else
+ export CM_IMAGE=${CM_DATASET_PATH}/ILSVRC2012_val_00000001.JPEG
+fi
+
+
+${CM_PYTHON_BIN} ${CM_TMP_CURRENT_SCRIPT_PATH}/src/classify.py --image ${CM_IMAGE}
+test $? -eq 0 || exit 1
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/src/classify.py b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/src/classify.py
new file mode 100644
index 0000000000..20c1642889
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/src/classify.py
@@ -0,0 +1,302 @@
+"""
+Developers:
+ - grigori@octoml.ai
+"""
+
+import time
+import os
+import argparse
+import json
+
+from PIL import Image
+import cv2
+
+import numpy as np
+
+import onnxruntime as rt
+
+
+# Image conversion from MLPerf(tm) vision
+def center_crop(img, out_height, out_width):
+ height, width, _ = img.shape
+ left = int((width - out_width) / 2)
+ right = int((width + out_width) / 2)
+ top = int((height - out_height) / 2)
+ bottom = int((height + out_height) / 2)
+ img = img[top:bottom, left:right]
+ return img
+
+
+def resize_with_aspectratio(
+ img, out_height, out_width, scale=87.5, inter_pol=cv2.INTER_LINEAR):
+ height, width, _ = img.shape
+ new_height = int(100. * out_height / scale)
+ new_width = int(100. * out_width / scale)
+ if height > width:
+ w = new_width
+ h = int(new_height * height / width)
+ else:
+ h = new_height
+ w = int(new_width * width / height)
+ img = cv2.resize(img, (w, h), interpolation=inter_pol)
+ return img
+
+
+# returns list of pairs (prob, class_index)
+def get_top5(all_probs):
+ probs_with_classes = []
+
+ for class_index in range(len(all_probs)):
+ prob = all_probs[class_index]
+ probs_with_classes.append((prob, class_index))
+
+ sorted_probs = sorted(
+ probs_with_classes,
+ key=lambda pair: pair[0],
+ reverse=True)
+ return sorted_probs[0:5]
+
+
+def run_case(dtype, image, target):
+ # Check image
+ import os
+ import json
+ import sys
+
+ STAT_REPEAT = os.environ.get('STAT_REPEAT', '')
+ if STAT_REPEAT == '' or STAT_REPEAT is None:
+ STAT_REPEAT = 10
+ STAT_REPEAT = int(STAT_REPEAT)
+
+ # FGG: set model files via CM env
+ CATEG_FILE = 'synset.txt'
+ synset = eval(open(os.path.join(CATEG_FILE)).read())
+
+ files = []
+ val = {}
+
+ # FGG: set timers
+ import time
+ timers = {}
+
+ img_orig = cv2.imread(image)
+
+ img = cv2.cvtColor(img_orig, cv2.COLOR_BGR2RGB)
+
+ output_height, output_width, _ = 224, 224, 3
+ img = resize_with_aspectratio(
+ img,
+ output_height,
+ output_width,
+ inter_pol=cv2.INTER_AREA)
+ img = center_crop(img, output_height, output_width)
+ img = np.asarray(img, dtype='float32')
+
+ # normalize image
+ means = np.array([123.68, 116.78, 103.94], dtype=np.float32)
+ img -= means
+
+ # transpose if needed
+ img = img.transpose([2, 0, 1])
+
+ import matplotlib.pyplot as plt
+ img1 = img.transpose([1, 2, 0])
+ # you can give axis attribute if you wanna squeeze in specific dimension
+ arr_ = np.squeeze(img1)
+ plt.imshow(arr_)
+# plt.show()
+ plt.savefig('pre-processed-image.png')
+ # Load model
+ model_path = os.environ.get('CM_ML_MODEL_FILE_WITH_PATH', '')
+ if model_path == '':
+ print('Error: environment variable CM_ML_MODEL_FILE_WITH_PATH is not defined')
+ exit(1)
+
+ opt = rt.SessionOptions()
+
+ if len(rt.get_all_providers()) > 1 and os.environ.get(
+ "USE_CUDA", "yes").lower() not in ["0", "false", "off", "no"]:
+ # Currently considering only CUDAExecutionProvider
+ sess = rt.InferenceSession(
+ model_path, opt, providers=['CUDAExecutionProvider'])
+ else:
+ sess = rt.InferenceSession(
+ model_path, opt, providers=["CPUExecutionProvider"])
+
+ inputs = [meta.name for meta in sess.get_inputs()]
+ outputs = [meta.name for meta in sess.get_outputs()]
+
+ print(inputs)
+ print(outputs)
+
+ if os.environ.get('USE_TVM', '') == 'yes':
+ import tvm
+ from tvm import relay
+ import onnx
+
+ del sess
+
+ # Load model via ONNX to be used with TVM
+ print('')
+ print('ONNX: load model ...')
+ print('')
+
+ onnx_model = onnx.load(model_path)
+
+ # Init TVM
+ # TBD: add tvm platform selector
+ if os.environ.get('USE_CUDA', '') == 'yes':
+ # TVM package must be built with CUDA enabled
+ ctx = tvm.cuda(0)
+ else:
+ ctx = tvm.cpu(0)
+ tvm_ctx = ctx
+
+ build_conf = {'relay.backend.use_auto_scheduler': False}
+ opt_lvl = int(os.environ.get('TVM_OPT_LEVEL', 3))
+ host = os.environ.get('CM_HOST_PLATFORM_FLAVOR')
+ if host == 'x86_64' and 'AMD' in os.environ.get(
+ 'CM_HOST_CPU_VENDOR_ID', ''):
+ target = os.environ.get('TVM_TARGET', 'llvm -mcpu=znver2')
+ else:
+ target = os.environ.get('TVM_TARGET', 'llvm')
+
+ target_host = None
+ params = {}
+
+ # New target API
+ tvm_target = tvm.target.Target(target, host=target_host)
+
+ input_shape = (1, 3, 224, 224)
+ shape_dict = {inputs[0]: input_shape}
+
+ print('')
+ print('TVM: import model ...')
+ print('')
+ # Extra param: opset=12
+ mod, params = relay.frontend.from_onnx(
+ onnx_model, shape_dict, freeze_params=True)
+
+ print('')
+ print('TVM: transform to static ...')
+ print('')
+ mod = relay.transform.DynamicToStatic()(mod)
+
+ print('')
+ print('TVM: apply extra optimizations ...')
+ print('')
+ # Padding optimization
+ # Adds extra optimizations
+ mod = relay.transform.FoldExplicitPadding()(mod)
+
+ print('')
+ print('TVM: build model ...')
+ print('')
+
+ executor = os.environ.get('MLPERF_TVM_EXECUTOR', 'graph')
+
+ if executor == "graph" or executor == "debug":
+ from tvm.contrib import graph_executor
+
+ # Without history
+ with tvm.transform.PassContext(opt_level=opt_lvl, config=build_conf):
+ graph_module = relay.build(mod,
+ target=tvm_target,
+ params=params)
+ lib = graph_module
+
+ print('')
+ print('TVM: init graph engine ...')
+ print('')
+
+ sess = graph_executor.GraphModule(lib['default'](ctx))
+
+ elif executor == "vm":
+ from tvm.runtime.vm import VirtualMachine
+
+ # Without history
+ with tvm.transform.PassContext(opt_level=opt_lvl, config=build_conf):
+ vm_exec = relay.vm.compile(
+ mod, target=tvm_target, params=params)
+
+ r_exec = vm_exec
+
+ print('')
+ print('TVM: init VM ...')
+ print('')
+
+ sess = VirtualMachine(r_exec, ctx)
+
+ # For now only graph
+ sess.set_input(inputs[0], tvm.nd.array([img]))
+
+ # Run TVM inference
+ sess.run()
+
+ # Process TVM outputs
+ output = []
+
+ for i in range(sess.get_num_outputs()):
+ # Take only the output of batch size for dynamic batches
+ if len(output) < (i + 1):
+ output.append([])
+ output[i].append(sess.get_output(i).asnumpy()[0])
+
+ else:
+ inp = {inputs[0]: np.array([img], dtype=np.float32)}
+ output = sess.run(outputs, inp)
+
+ top1 = np.argmax(output[1]) - 1 # .asnumpy())
+
+ top5 = []
+ atop5 = get_top5(output[1][0]) # .asnumpy())
+
+ print('')
+ print('Prediction Top1:', top1, synset[top1])
+
+ print('')
+ print('Prediction Top5:')
+ for p in atop5:
+ out = p[1] - 1
+ name = synset[out]
+ print(' * {} {}'.format(out, name))
+
+ ck_results = {
+ 'prediction': synset[top1]
+ }
+
+ with open('tmp-ck-timer.json', 'w') as ck_results_file:
+ json.dump(ck_results, ck_results_file, indent=2, sort_keys=True)
+
+ return
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '--image',
+ type=str,
+ help="Path to JPEG image.",
+ default=None,
+ required=True)
+ parser.add_argument('--target', type=str, help="Target", default=None)
+ args = parser.parse_args()
+
+ if args.image.strip().lower() == '':
+ print('Please specify path to an image using CM_IMAGE environment variable!')
+ exit(1)
+
+ # set parameter
+ batch_size = 1
+ num_classes = 1000
+ image_shape = (3, 224, 224)
+
+ # load model
+ data_shape = (batch_size,) + image_shape
+ out_shape = (batch_size, num_classes)
+
+ dtype = 'float32'
+ if os.environ.get('CM_TVM_DTYPE', '') != '':
+ dtype = os.environ['CM_TVM_DTYPE']
+
+ run_case(dtype, args.image, args.target)
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/COPYRIGHT.md
new file mode 100644
index 0000000000..696f829223
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone.
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/README-extra.md
new file mode 100644
index 0000000000..19fe90edb0
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/README-extra.md
@@ -0,0 +1,32 @@
+# Examples
+
+First download images:
+
+```bash
+cmr "download file _wget" --url=https://cKnowledge.org/ai/data/data.pgm --ssl-verify=no --md5sum=0af279e557a8de252d7ff0751a999379
+cmr "download file _wget" --url=https://cKnowledge.org/ai/data/computer_mouse.jpg --ssl-verify=no --md5sum=45ae5c940233892c2f860efdf0b66e7e
+cmr "download file _wget" --url=https://cKnowledge.org/ai/data/computer_mouse2.jpg --ssl-verify=no --md5sum=e7e2050b41e0b85cedca3ca87ab55390
+cmr "download file _wget" --url=https://cKnowledge.org/ai/data/computer_mouse2.pgm --ssl-verify=no --md5sum=a4e48556d3eb09402bfc98e375b41311
+```
+
+Then run app
+
+```bash
+cm run script "app image corner-detection"
+cm run script "app image corner-detection" -add_deps_recursive.compiler.tags=llvm
+cm run script "app image corner-detection" -add_deps_recursive.compiler.tags=gcc
+cm run script "app image corner-detection" -add_deps_recursive.compiler.tags=llvm --add_deps_recursive.compiler.version_min=11.0.0 --add_deps_recursive.compiler.version_max=13.0.0
+```
+
+## Reproducibility matrix
+
+* Ubuntu 22.04; x64; LLVM 17.06
+* Windows 11; x64; LLVM 17.06
+
+## Debugging scripts without CM
+
+```bash
+cmr "app image corner-detection" --debug_script_tags=compile,cpp-program
+cmr "app image corner-detection" --debug-script-tags=benchmark,program
+```
+
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/README.md b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/README.md
new file mode 100644
index 0000000000..2697a585cd
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/README.md
@@ -0,0 +1 @@
+Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-application-pipeline/app-image-corner-detection](https://docs.mlcommons.org/cm4mlops/scripts/Modular-application-pipeline/app-image-corner-detection) for the documentation of this CM script.
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/_cm.yaml
new file mode 100644
index 0000000000..1fd27d9b68
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/_cm.yaml
@@ -0,0 +1,32 @@
+alias: app-image-corner-detection
+uid: 998ffee0bc534d0a
+
+automation_alias: script
+automation_uid: 5b4e0237da074764
+
+category: Modular application pipeline
+
+deps:
+- tags: detect,os
+
+- tags: detect,cpu
+
+- tags: download,file,_url.https://cKnowledge.org/ai/data/data.pgm
+ md5sum: 0af279e557a8de252d7ff0751a999379
+ force_cache: false
+
+
+posthook_deps:
+- skip_if_env:
+ CM_SKIP_COMPILE:
+ - 'on'
+ tags: compile,cpp-program
+- skip_if_env:
+ CM_SKIP_RUN:
+ - 'on'
+ tags: benchmark-program
+
+tags:
+- app
+- image
+- corner-detection
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/customize.py b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/customize.py
new file mode 100644
index 0000000000..962f0de433
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/customize.py
@@ -0,0 +1,54 @@
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+from cmind import utils
+import os
+
+
+def preprocess(i):
+ os_info = i['os_info']
+
+ env = i['env']
+ script_path = i['run_script_input']['path']
+
+ env["CM_SOURCE_FOLDER_PATH"] = script_path
+ env['CM_C_SOURCE_FILES'] = "susan.c"
+
+ if 'CM_INPUT' not in env:
+ env['CM_INPUT'] = os.path.join(script_path, 'data.pgm')
+
+ if 'CM_OUTPUT' not in env:
+ env['CM_OUTPUT'] = 'output_image_with_corners.pgm'
+
+ if 'CM_RUN_DIR' not in env:
+ output_path = os.path.join(script_path, "output")
+ if output_path != '' and not os.path.isdir(output_path):
+ os.makedirs(output_path)
+
+ env['CM_RUN_DIR'] = output_path
+
+ env['CM_RUN_SUFFIX'] = env['CM_INPUT'] + ' ' + env['CM_OUTPUT'] + ' -c'
+
+ if os_info['platform'] == 'windows':
+ env['CM_BIN_NAME'] = 'image-corner.exe'
+ else:
+ env['CM_BIN_NAME'] = 'image-corner'
+ env['+ LDCFLAGS'] = ["-lm"]
+
+ return {'return': 0}
+
+
+def postprocess(i):
+
+ env = i['env']
+ print(env['CM_OUTPUT'] + " generated in " + env['CM_RUN_DIR'])
+
+ return {'return': 0}
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/run.sh
new file mode 100644
index 0000000000..30cfbdd00e
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/run.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+CUR=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD}
+mkdir -p $CUR"/output"
+
+test $? -eq 0 || exit 1
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/susan.c b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/susan.c
new file mode 100644
index 0000000000..8a41d9a22e
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/susan.c
@@ -0,0 +1,2161 @@
+/* {{{ Copyright etc. */
+
+/**********************************************************************\
+
+ SUSAN Version 2l by Stephen Smith
+ Oxford Centre for Functional Magnetic Resonance Imaging of the Brain,
+ Department of Clinical Neurology, Oxford University, Oxford, UK
+ (Previously in Computer Vision and Image Processing Group - now
+ Computer Vision and Electro Optics Group - DERA Chertsey, UK)
+ Email: steve@fmrib.ox.ac.uk
+ WWW: http://www.fmrib.ox.ac.uk/~steve
+
+ (C) Crown Copyright (1995-1999), Defence Evaluation and Research Agency,
+ Farnborough, Hampshire, GU14 6TD, UK
+ DERA WWW site:
+ http://www.dera.gov.uk/
+ DERA Computer Vision and Electro Optics Group WWW site:
+ http://www.dera.gov.uk/imageprocessing/dera/group_home.html
+ DERA Computer Vision and Electro Optics Group point of contact:
+ Dr. John Savage, jtsavage@dera.gov.uk, +44 1344 633203
+
+ A UK patent has been granted: "Method for digitally processing
+ images to determine the position of edges and/or corners therein for
+ guidance of unmanned vehicle", UK Patent 2272285. Proprietor:
+ Secretary of State for Defence, UK. 15 January 1997
+
+ This code is issued for research purposes only and remains the
+ property of the UK Secretary of State for Defence. This code must
+ not be passed on without this header information being kept
+ intact. This code must not be sold.
+
+\**********************************************************************/
+
+/* }}} */
+/* {{{ Readme First */
+
+/**********************************************************************\
+
+ SUSAN Version 2l
+ SUSAN = Smallest Univalue Segment Assimilating Nucleus
+
+ Email: steve@fmrib.ox.ac.uk
+ WWW: http://www.fmrib.ox.ac.uk/~steve
+
+ Related paper:
+ @article{Smith97,
+ author = "Smith, S.M. and Brady, J.M.",
+ title = "{SUSAN} - A New Approach to Low Level Image Processing",
+ journal = "Int. Journal of Computer Vision",
+ pages = "45--78",
+ volume = "23",
+ number = "1",
+ month = "May",
+ year = 1997}
+
+ To be registered for automatic (bug) updates of SUSAN, send an email.
+
+ Compile with:
+ gcc -O4 -o susan susan2l.c -lm
+
+ See following section for different machine information. Please
+ report any bugs (and fixes). There are a few optional changes that
+ can be made in the "defines" section which follows shortly.
+
+ Usage: type "susan" to get usage. Only PGM format files can be input
+ and output. Utilities such as the netpbm package and XV can be used
+ to convert to and from other formats. Any size of image can be
+ processed.
+
+ This code is written using an emacs folding mode, making moving
+ around the different sections very easy. This is why there are
+ various marks within comments and why comments are indented.
+
+
+ SUSAN QUICK:
+
+ This version of the SUSAN corner finder does not do all the
+ false-corner suppression and thus is faster and produced some false
+ positives, particularly on strong edges. However, because there are
+ less stages involving thresholds etc., the corners that are
+ correctly reported are usually more stable than those reported with
+ the full algorithm. Thus I recommend at least TRYING this algorithm
+ for applications where stability is important, e.g., tracking.
+
+ THRESHOLDS:
+
+ There are two thresholds which can be set at run-time. These are the
+ brightness threshold (t) and the distance threshold (d).
+
+ SPATIAL CONTROL: d
+
+ In SUSAN smoothing d controls the size of the Gaussian mask; its
+ default is 4.0. Increasing d gives more smoothing. In edge finding,
+ a fixed flat mask is used, either 37 pixels arranged in a "circle"
+ (default), or a 3 by 3 mask which gives finer detail. In corner
+ finding, only the larger 37 pixel mask is used; d is not
+ variable. In smoothing, the flat 3 by 3 mask can be used instead of
+ a larger Gaussian mask; this gives low smoothing and fast operation.
+
+ BRIGHTNESS CONTROL: t
+
+ In all three algorithms, t can be varied (default=20); this is the
+ main threshold to be varied. It determines the maximum difference in
+ greylevels between two pixels which allows them to be considered
+ part of the same "region" in the image. Thus it can be reduced to
+ give more edges or corners, i.e. to be more sensitive, and vice
+ versa. In smoothing, reducing t gives less smoothing, and vice
+ versa. Set t=10 for the test image available from the SUSAN web
+ page.
+
+ ITERATIONS:
+
+ With SUSAN smoothing, more smoothing can also be obtained by
+ iterating the algorithm several times. This has a different effect
+ from varying d or t.
+
+ FIXED MASKS:
+
+ 37 pixel mask: ooo 3 by 3 mask: ooo
+ ooooo ooo
+ ooooooo ooo
+ ooooooo
+ ooooooo
+ ooooo
+ ooo
+
+ CORNER ATTRIBUTES dx, dy and I
+ (Only read this if you are interested in the C implementation or in
+ using corner attributes, e.g., for corner matching)
+
+ Corners reported in the corner list have attributes associated with
+ them as well as positions. This is useful, for example, when
+ attempting to match corners from one image to another, as these
+ attributes can often be fairly unchanged between images. The
+ attributes are dx, dy and I. I is the value of image brightness at
+ the position of the corner. In the case of susan_corners_quick, dx
+ and dy are the first order derivatives (differentials) of the image
+ brightness in the x and y directions respectively, at the position
+ of the corner. In the case of normal susan corner finding, dx and dy
+ are scaled versions of the position of the centre of gravity of the
+ USAN with respect to the centre pixel (nucleus).
+
+ BRIGHTNESS FUNCTION LUT IMPLEMENTATION:
+ (Only read this if you are interested in the C implementation)
+
+ The SUSAN brightness function is implemented as a LUT
+ (Look-Up-Table) for speed. The resulting pointer-based code is a
+ little hard to follow, so here is a brief explanation. In
+ setup_brightness_lut() the LUT is setup. This mallocs enough space
+ for *bp and then repositions the pointer to the centre of the
+ malloced space. The SUSAN function e^-(x^6) or e^-(x^2) is
+ calculated and converted to a uchar in the range 0-100, for all
+ possible image brightness differences (including negative
+ ones). Thus bp[23] is the output for a brightness difference of 23
+ greylevels. In the SUSAN algorithms this LUT is used as follows:
+
+ p=in + (i-3)*x_size + j - 1;
+ p points to the first image pixel in the circular mask surrounding
+ point (x,y).
+
+ cp=bp + in[i*x_size+j];
+ cp points to a position in the LUT corresponding to the brightness
+ of the centre pixel (x,y).
+
+ now for every pixel within the mask surrounding (x,y),
+ n+=*(cp-*p++);
+ the brightness difference function is found by moving the cp pointer
+ down by an amount equal to the value of the pixel pointed to by p,
+ thus subtracting the two brightness values and performing the
+ exponential function. This value is added to n, the running USAN
+ area.
+
+ in SUSAN smoothing, the variable height mask is implemented by
+ multiplying the above by the moving mask pointer, reset for each new
+ centre pixel.
+ tmp = *dpt++ * *(cp-brightness);
+
+\**********************************************************************/
+
+/* }}} */
+/* {{{ Machine Information */
+
+/**********************************************************************\
+
+ Success has been reported with the following:
+
+ MACHINE OS COMPILER
+
+ Sun 4.1.4 bundled C, gcc
+
+ Next
+
+ SGI IRIX SGI cc
+
+ DEC Unix V3.2+
+
+ IBM RISC AIX gcc
+
+ PC Borland 5.0
+
+ PC Linux gcc-2.6.3
+
+ PC Win32 Visual C++ 4.0 (Console Application)
+
+ PC Win95 Visual C++ 5.0 (Console Application)
+ Thanks to Niu Yongsheng :
+ Use the FOPENB option below
+
+ PC DOS djgpp gnu C
+ Thanks to Mark Pettovello :
+ Use the FOPENB option below
+
+ HP HP-UX bundled cc
+ Thanks to Brian Dixon :
+ in ksh:
+ export CCOPTS="-Aa -D_HPUX_SOURCE | -lM"
+ cc -O3 -o susan susan2l.c
+
+\**********************************************************************/
+
+/* }}} */
+/* {{{ History */
+
+/**********************************************************************\
+
+ SUSAN Version 2l, 12/2/99
+ Changed GNUDOS option to FOPENB.
+ (Thanks to Niu Yongsheng .)
+ Took out redundant "sq=sq/2;".
+
+ SUSAN Version 2k, 19/8/98:
+ In corner finding:
+ Changed if(yyx_size) etc. tests in smoothing.
+ Added a couple of free() calls for cgx and cgy.
+ (Thanks to geoffb@ucs.ed.ac.uk - Geoff Browitt.)
+
+ SUSAN Version 2i, 21/7/97:
+ Added information about corner attributes.
+
+ SUSAN Version 2h, 16/12/96:
+ Added principle (initial enhancement) option.
+
+ SUSAN Version 2g, 2/7/96:
+ Minor superficial changes to code.
+
+ SUSAN Version 2f, 16/1/96:
+ Added GNUDOS option (now called FOPENB; see options below).
+
+ SUSAN Version 2e, 9/1/96:
+ Added -b option.
+ Fixed 1 pixel horizontal offset error for drawing edges.
+
+ SUSAN Version 2d, 27/11/95:
+ Fixed loading of certain PGM files in get_image (again!)
+
+ SUSAN Version 2c, 22/11/95:
+ Fixed loading of certain PGM files in get_image.
+ (Thanks to qu@San-Jose.ate.slb.com - Gongyuan Qu.)
+
+ SUSAN Version 2b, 9/11/95:
+ removed "z==" error in edges routines.
+
+ SUSAN Version 2a, 6/11/95:
+ Removed a few unnecessary variable declarations.
+ Added different machine information.
+ Changed "header" in get_image to char.
+
+ SUSAN Version 2, 1/11/95: first combined version able to take any
+ image sizes.
+
+ SUSAN "Versions 1", circa 1992: the various SUSAN algorithms were
+ developed during my doctorate within different programs and for
+ fixed image sizes. The algorithms themselves are virtually unaltered
+ between "versions 1" and the combined program, version 2.
+
+\**********************************************************************/
+
+/* }}} */
+/* {{{ defines, includes and typedefs */
+
+/* ********** Optional settings */
+
+#ifndef PPC
+typedef int TOTAL_TYPE; /* this is faster for "int" but should be "float" for large d masks */
+#else
+typedef float TOTAL_TYPE; /* for my PowerPC accelerator only */
+#endif
+
+/*#define FOPENB*/ /* uncomment if using djgpp gnu C for DOS or certain Win95 compilers */
+#define SEVEN_SUPP /* size for non-max corner suppression; SEVEN_SUPP or FIVE_SUPP */
+#define MAX_CORNERS 15000 /* max corners per frame */
+
+/* ********** Leave the rest - but you may need to remove one or both of sys/file.h and malloc.h lines */
+
+#include
+#include
+#include
+#include
+#define exit_error(IFB,IFC) { fprintf(stderr,IFB,IFC); exit(0); }
+#define FTOI(a) ( (a) < 0 ? ((int)(a-0.5)) : ((int)(a+0.5)) )
+typedef unsigned char uchar;
+typedef struct {int x,y,info, dx, dy, I;} CORNER_LIST[MAX_CORNERS];
+
+/* }}} */
+/* {{{ usage() */
+
+#ifdef OPENME
+#include
+#endif
+#ifdef XOPENME
+#include
+#endif
+
+void usage(void)
+{
+ printf("Usage: susan [options]\n\n");
+
+ printf("-s : Smoothing mode (default)\n");
+ printf("-e : Edges mode\n");
+ printf("-c : Corners mode\n\n");
+
+ printf("See source code for more information about setting the thresholds\n");
+ printf("-t : Brightness threshold, all modes (default=20)\n");
+ printf("-d : Distance threshold, smoothing mode, (default=4) (use next option instead for flat 3x3 mask)\n");
+ printf("-3 : Use flat 3x3 mask, edges or smoothing mode\n");
+ printf("-n : No post-processing on the binary edge map (runs much faster); edges mode\n");
+ printf("-q : Use faster (and usually stabler) corner mode; edge-like corner suppression not carried out; corners mode\n");
+ printf("-b : Mark corners/edges with single black points instead of black with white border; corners or edges mode\n");
+ printf("-p : Output initial enhancement image only; corners or edges mode (default is edges mode)\n");
+
+ printf("\nSUSAN Version 2l (C) 1995-1997 Stephen Smith, DRA UK. steve@fmrib.ox.ac.uk\n");
+
+ exit(0);
+}
+
+/* }}} */
+/* {{{ get_image(filename,in,x_size,y_size) */
+
+/* {{{ int getint(fp) derived from XV */
+
+int getint(FILE* fd)
+{
+ int c, i;
+ char dummy[10000];
+
+ c = getc(fd);
+ while (1) /* find next integer */
+ {
+ if (c=='#') /* if we're at a comment, read to end of line */
+ fgets(dummy,9000,fd);
+ if (c==EOF)
+ exit_error("Image %s not binary PGM.\n","is");
+ if (c>='0' && c<='9')
+ break; /* found what we were looking for */
+ c = getc(fd);
+ }
+
+ /* we're at the start of a number, continue until we hit a non-number */
+ i = 0;
+ while (1) {
+ i = (i*10) + (c - '0');
+ c = getc(fd);
+ if (c==EOF) return (i);
+ if (c<'0' || c>'9') break;
+ }
+
+ return (i);
+}
+
+/* }}} */
+
+void get_image(char filename[200], unsigned char** in, int* x_size, int* y_size)
+{
+FILE *fd;
+char header [100];
+int tmp;
+
+#ifdef FOPENB
+ if ((fd=fopen(filename,"rb")) == NULL)
+#else
+ if ((fd=fopen(filename,"r")) == NULL)
+#endif
+ exit_error("Can't input image %s.\n",filename);
+
+ /* {{{ read header */
+
+ header[0]=fgetc(fd);
+ header[1]=fgetc(fd);
+ if(!(header[0]=='P' && header[1]=='5'))
+ exit_error("Image %s does not have binary PGM header.\n",filename);
+
+ *x_size = getint(fd);
+ *y_size = getint(fd);
+ tmp = getint(fd);
+
+/* }}} */
+
+ *in = (uchar *) malloc(*x_size * *y_size);
+
+ if (fread(*in,1,*x_size * *y_size,fd) == 0)
+ exit_error("Image %s is wrong size.\n",filename);
+
+ fclose(fd);
+}
+
+/* }}} */
+/* {{{ put_image(filename,in,x_size,y_size) */
+
+void put_image(char filename[100], char* in, int x_size, int y_size)
+{
+FILE *fd;
+
+#ifdef FOPENB
+ if ((fd=fopen(filename,"wb")) == NULL)
+#else
+ if ((fd=fopen(filename,"w")) == NULL)
+#endif
+ exit_error("Can't output image%s.\n",filename);
+
+ fprintf(fd,"P5\n");
+ fprintf(fd,"%d %d\n",x_size,y_size);
+ fprintf(fd,"255\n");
+
+ if (fwrite(in,x_size*y_size,1,fd) != 1)
+ exit_error("Can't write image %s.\n",filename);
+
+ fclose(fd);
+}
+
+/* }}} */
+/* {{{ int_to_uchar(r,in,size) */
+
+void int_to_uchar(int* r, uchar* in, int size)
+{
+int i,
+ max_r=r[0],
+ min_r=r[0];
+
+ for (i=0; i max_r )
+ max_r=r[i];
+ if ( r[i] < min_r )
+ min_r=r[i];
+ }
+
+ /*printf("min=%d max=%d\n",min_r,max_r);*/
+
+ max_r-=min_r;
+
+ for (i=0; ip[l+1])
+ {
+ tmp=p[l]; p[l]=p[l+1]; p[l+1]=tmp;
+ }
+
+ return( (p[3]+p[4]) / 2 );
+}
+
+/* }}} */
+/* {{{ enlarge(in,tmp_image,x_size,y_size,border) */
+
+/* this enlarges "in" so that borders can be dealt with easily */
+
+void enlarge(uchar** in, uchar* tmp_image, int* x_size, int* y_size, int border)
+{
+int i, j;
+
+ for(i=0; i<*y_size; i++) /* copy *in into tmp_image */
+ memcpy(tmp_image+(i+border)*(*x_size+2*border)+border, *in+i* *x_size, *x_size);
+
+ for(i=0; i15) && (total==0) )
+ {
+ printf("Distance_thresh (%f) too big for integer arithmetic.\n",dt);
+ printf("Either reduce it to <=15 or recompile with variable \"total\"\n");
+ printf("as a float: see top \"defines\" section.\n");
+ exit(0);
+ }
+
+ if ( (2*mask_size+1>x_size) || (2*mask_size+1>y_size) )
+ {
+ printf("Mask size (1.5*distance_thresh+1=%d) too big for image (%dx%d).\n",mask_size,x_size,y_size);
+ exit(0);
+ }
+
+ tmp_image = (uchar *) malloc( (x_size+mask_size*2) * (y_size+mask_size*2) );
+ enlarge(&in,tmp_image,&x_size,&y_size,mask_size);
+
+/* }}} */
+
+ if (three_by_three==0)
+ { /* large Gaussian masks */
+ /* {{{ setup distance lut */
+
+ n_max = (mask_size*2) + 1;
+
+ increment = x_size - n_max;
+
+ dp = (unsigned char *)malloc(n_max*n_max);
+ dpt = dp;
+ temp = -(dt*dt);
+
+ for(i=-mask_size; i<=mask_size; i++)
+ for(j=-mask_size; j<=mask_size; j++)
+ {
+ x = (int) (100.0 * exp( ((float)((i*i)+(j*j))) / temp ));
+ *dpt++ = (unsigned char)x;
+ }
+
+/* }}} */
+ /* {{{ main section */
+
+ for (i=mask_size;im) { m=l[y+y+y+x]; a=y; b=x; }
+
+ if (m>0)
+ {
+ if (mid[i*x_size+j]<4)
+ mid[(i+a-1)*x_size+j+b-1] = 4;
+ else
+ mid[(i+a-1)*x_size+j+b-1] = mid[i*x_size+j]+1;
+ if ( (a+a+b) < 3 ) /* need to jump back in image */
+ {
+ i+=a-1;
+ j+=b-2;
+ if (i<4) i=4;
+ if (j<4) j=4;
+ }
+ }
+ }
+
+/* }}} */
+ /* {{{ n==2 */
+
+ if (n==2)
+ {
+ /* put in a bit here to straighten edges */
+ b00 = mid[(i-1)*x_size+j-1]<8; /* corners of 3x3 */
+ b02 = mid[(i-1)*x_size+j+1]<8;
+ b20 = mid[(i+1)*x_size+j-1]<8;
+ b22 = mid[(i+1)*x_size+j+1]<8;
+ if ( ((b00+b02+b20+b22)==2) && ((b00|b22)&(b02|b20)))
+ { /* case: move a point back into line.
+ e.g. X O X CAN become X X X
+ O X O O O O
+ O O O O O O */
+ if (b00)
+ {
+ if (b02) { x=0; y=-1; }
+ else { x=-1; y=0; }
+ }
+ else
+ {
+ if (b02) { x=1; y=0; }
+ else { x=0; y=1; }
+ }
+ if (((float)r[(i+y)*x_size+j+x]/(float)centre) > 0.7)
+ {
+ if ( ( (x==0) && (mid[(i+(2*y))*x_size+j]>7) && (mid[(i+(2*y))*x_size+j-1]>7) && (mid[(i+(2*y))*x_size+j+1]>7) ) ||
+ ( (y==0) && (mid[(i)*x_size+j+(2*x)]>7) && (mid[(i+1)*x_size+j+(2*x)]>7) && (mid[(i-1)*x_size+j+(2*x)]>7) ) )
+ {
+ mid[(i)*x_size+j]=100;
+ mid[(i+y)*x_size+j+x]=3; /* no jumping needed */
+ }
+ }
+ }
+ else
+ {
+ b01 = mid[(i-1)*x_size+j ]<8;
+ b12 = mid[(i )*x_size+j+1]<8;
+ b21 = mid[(i+1)*x_size+j ]<8;
+ b10 = mid[(i )*x_size+j-1]<8;
+ /* {{{ right angle ends - not currently used */
+
+#ifdef IGNORETHIS
+ if ( (b00&b01)|(b00&b10)|(b02&b01)|(b02&b12)|(b20&b10)|(b20&b21)|(b22&b21)|(b22&b12) )
+ { /* case; right angle ends. clean up.
+ e.g.; X X O CAN become X X O
+ O X O O O O
+ O O O O O O */
+ if ( ((b01)&(mid[(i-2)*x_size+j-1]>7)&(mid[(i-2)*x_size+j]>7)&(mid[(i-2)*x_size+j+1]>7)&
+ ((b00&((2*r[(i-1)*x_size+j+1])>centre))|(b02&((2*r[(i-1)*x_size+j-1])>centre)))) |
+ ((b10)&(mid[(i-1)*x_size+j-2]>7)&(mid[(i)*x_size+j-2]>7)&(mid[(i+1)*x_size+j-2]>7)&
+ ((b00&((2*r[(i+1)*x_size+j-1])>centre))|(b20&((2*r[(i-1)*x_size+j-1])>centre)))) |
+ ((b12)&(mid[(i-1)*x_size+j+2]>7)&(mid[(i)*x_size+j+2]>7)&(mid[(i+1)*x_size+j+2]>7)&
+ ((b02&((2*r[(i+1)*x_size+j+1])>centre))|(b22&((2*r[(i-1)*x_size+j+1])>centre)))) |
+ ((b21)&(mid[(i+2)*x_size+j-1]>7)&(mid[(i+2)*x_size+j]>7)&(mid[(i+2)*x_size+j+1]>7)&
+ ((b20&((2*r[(i+1)*x_size+j+1])>centre))|(b22&((2*r[(i+1)*x_size+j-1])>centre)))) )
+ {
+ mid[(i)*x_size+j]=100;
+ if (b10&b20) j-=2;
+ if (b00|b01|b02) { i--; j-=2; }
+ }
+ }
+#endif
+
+/* }}} */
+ if ( ((b01+b12+b21+b10)==2) && ((b10|b12)&(b01|b21)) &&
+ ((b01&((mid[(i-2)*x_size+j-1]<8)|(mid[(i-2)*x_size+j+1]<8)))|(b10&((mid[(i-1)*x_size+j-2]<8)|(mid[(i+1)*x_size+j-2]<8)))|
+ (b12&((mid[(i-1)*x_size+j+2]<8)|(mid[(i+1)*x_size+j+2]<8)))|(b21&((mid[(i+2)*x_size+j-1]<8)|(mid[(i+2)*x_size+j+1]<8)))) )
+ { /* case; clears odd right angles.
+ e.g.; O O O becomes O O O
+ X X O X O O
+ O X O O X O */
+ mid[(i)*x_size+j]=100;
+ i--; /* jump back */
+ j-=2;
+ if (i<4) i=4;
+ if (j<4) j=4;
+ }
+ }
+ }
+
+/* }}} */
+ /* {{{ n>2 the thinning is done here without breaking connectivity */
+
+ if (n>2)
+ {
+ b01 = mid[(i-1)*x_size+j ]<8;
+ b12 = mid[(i )*x_size+j+1]<8;
+ b21 = mid[(i+1)*x_size+j ]<8;
+ b10 = mid[(i )*x_size+j-1]<8;
+ if((b01+b12+b21+b10)>1)
+ {
+ b00 = mid[(i-1)*x_size+j-1]<8;
+ b02 = mid[(i-1)*x_size+j+1]<8;
+ b20 = mid[(i+1)*x_size+j-1]<8;
+ b22 = mid[(i+1)*x_size+j+1]<8;
+ p1 = b00 | b01;
+ p2 = b02 | b12;
+ p3 = b22 | b21;
+ p4 = b20 | b10;
+
+ if( ((p1 + p2 + p3 + p4) - ((b01 & p2)+(b12 & p3)+(b21 & p4)+(b10 & p1))) < 2)
+ {
+ mid[(i)*x_size+j]=100;
+ i--;
+ j-=2;
+ if (i<4) i=4;
+ if (j<4) j=4;
+ }
+ }
+ }
+
+/* }}} */
+ }
+}
+
+/* }}} */
+/* {{{ susan_edges(in,r,sf,max_no,out) */
+
+void susan_edges(uchar* in, int* r, uchar* mid, uchar* bp,
+ int max_no, int x_size, int y_size)
+{
+float z;
+int do_symmetry, i, j, m, n, a, b, x, y, w;
+uchar c,*p,*cp;
+
+ memset (r,0,x_size * y_size * sizeof(int));
+
+ for (i=3;i0)
+ {
+ m=r[i*x_size+j];
+ n=max_no - m;
+ cp=bp + in[i*x_size+j];
+
+ if (n>600)
+ {
+ p=in + (i-3)*x_size + j - 1;
+ x=0;y=0;
+
+ c=*(cp-*p++);x-=c;y-=3*c;
+ c=*(cp-*p++);y-=3*c;
+ c=*(cp-*p);x+=c;y-=3*c;
+ p+=x_size-3;
+
+ c=*(cp-*p++);x-=2*c;y-=2*c;
+ c=*(cp-*p++);x-=c;y-=2*c;
+ c=*(cp-*p++);y-=2*c;
+ c=*(cp-*p++);x+=c;y-=2*c;
+ c=*(cp-*p);x+=2*c;y-=2*c;
+ p+=x_size-5;
+
+ c=*(cp-*p++);x-=3*c;y-=c;
+ c=*(cp-*p++);x-=2*c;y-=c;
+ c=*(cp-*p++);x-=c;y-=c;
+ c=*(cp-*p++);y-=c;
+ c=*(cp-*p++);x+=c;y-=c;
+ c=*(cp-*p++);x+=2*c;y-=c;
+ c=*(cp-*p);x+=3*c;y-=c;
+ p+=x_size-6;
+
+ c=*(cp-*p++);x-=3*c;
+ c=*(cp-*p++);x-=2*c;
+ c=*(cp-*p);x-=c;
+ p+=2;
+ c=*(cp-*p++);x+=c;
+ c=*(cp-*p++);x+=2*c;
+ c=*(cp-*p);x+=3*c;
+ p+=x_size-6;
+
+ c=*(cp-*p++);x-=3*c;y+=c;
+ c=*(cp-*p++);x-=2*c;y+=c;
+ c=*(cp-*p++);x-=c;y+=c;
+ c=*(cp-*p++);y+=c;
+ c=*(cp-*p++);x+=c;y+=c;
+ c=*(cp-*p++);x+=2*c;y+=c;
+ c=*(cp-*p);x+=3*c;y+=c;
+ p+=x_size-5;
+
+ c=*(cp-*p++);x-=2*c;y+=2*c;
+ c=*(cp-*p++);x-=c;y+=2*c;
+ c=*(cp-*p++);y+=2*c;
+ c=*(cp-*p++);x+=c;y+=2*c;
+ c=*(cp-*p);x+=2*c;y+=2*c;
+ p+=x_size-3;
+
+ c=*(cp-*p++);x-=c;y+=3*c;
+ c=*(cp-*p++);y+=3*c;
+ c=*(cp-*p);x+=c;y+=3*c;
+
+ z = sqrt((float)((x*x) + (y*y)));
+ if (z > (0.9*(float)n)) /* 0.5 */
+ {
+ do_symmetry=0;
+ if (x==0)
+ z=1000000.0;
+ else
+ z=((float)y) / ((float)x);
+ if (z < 0) { z=-z; w=-1; }
+ else w=1;
+ if (z < 0.5) { /* vert_edge */ a=0; b=1; }
+ else { if (z > 2.0) { /* hor_edge */ a=1; b=0; }
+ else { /* diag_edge */ if (w>0) { a=1; b=1; }
+ else { a=-1; b=1; }}}
+ if ( (m > r[(i+a)*x_size+j+b]) && (m >= r[(i-a)*x_size+j-b]) &&
+ (m > r[(i+(2*a))*x_size+j+(2*b)]) && (m >= r[(i-(2*a))*x_size+j-(2*b)]) )
+ mid[i*x_size+j] = 1;
+ }
+ else
+ do_symmetry=1;
+ }
+ else
+ do_symmetry=1;
+
+ if (do_symmetry==1)
+ {
+ p=in + (i-3)*x_size + j - 1;
+ x=0; y=0; w=0;
+
+ /* | \
+ y -x- w
+ | \ */
+
+ c=*(cp-*p++);x+=c;y+=9*c;w+=3*c;
+ c=*(cp-*p++);y+=9*c;
+ c=*(cp-*p);x+=c;y+=9*c;w-=3*c;
+ p+=x_size-3;
+
+ c=*(cp-*p++);x+=4*c;y+=4*c;w+=4*c;
+ c=*(cp-*p++);x+=c;y+=4*c;w+=2*c;
+ c=*(cp-*p++);y+=4*c;
+ c=*(cp-*p++);x+=c;y+=4*c;w-=2*c;
+ c=*(cp-*p);x+=4*c;y+=4*c;w-=4*c;
+ p+=x_size-5;
+
+ c=*(cp-*p++);x+=9*c;y+=c;w+=3*c;
+ c=*(cp-*p++);x+=4*c;y+=c;w+=2*c;
+ c=*(cp-*p++);x+=c;y+=c;w+=c;
+ c=*(cp-*p++);y+=c;
+ c=*(cp-*p++);x+=c;y+=c;w-=c;
+ c=*(cp-*p++);x+=4*c;y+=c;w-=2*c;
+ c=*(cp-*p);x+=9*c;y+=c;w-=3*c;
+ p+=x_size-6;
+
+ c=*(cp-*p++);x+=9*c;
+ c=*(cp-*p++);x+=4*c;
+ c=*(cp-*p);x+=c;
+ p+=2;
+ c=*(cp-*p++);x+=c;
+ c=*(cp-*p++);x+=4*c;
+ c=*(cp-*p);x+=9*c;
+ p+=x_size-6;
+
+ c=*(cp-*p++);x+=9*c;y+=c;w-=3*c;
+ c=*(cp-*p++);x+=4*c;y+=c;w-=2*c;
+ c=*(cp-*p++);x+=c;y+=c;w-=c;
+ c=*(cp-*p++);y+=c;
+ c=*(cp-*p++);x+=c;y+=c;w+=c;
+ c=*(cp-*p++);x+=4*c;y+=c;w+=2*c;
+ c=*(cp-*p);x+=9*c;y+=c;w+=3*c;
+ p+=x_size-5;
+
+ c=*(cp-*p++);x+=4*c;y+=4*c;w-=4*c;
+ c=*(cp-*p++);x+=c;y+=4*c;w-=2*c;
+ c=*(cp-*p++);y+=4*c;
+ c=*(cp-*p++);x+=c;y+=4*c;w+=2*c;
+ c=*(cp-*p);x+=4*c;y+=4*c;w+=4*c;
+ p+=x_size-3;
+
+ c=*(cp-*p++);x+=c;y+=9*c;w-=3*c;
+ c=*(cp-*p++);y+=9*c;
+ c=*(cp-*p);x+=c;y+=9*c;w+=3*c;
+
+ if (y==0)
+ z = 1000000.0;
+ else
+ z = ((float)x) / ((float)y);
+ if (z < 0.5) { /* vertical */ a=0; b=1; }
+ else { if (z > 2.0) { /* horizontal */ a=1; b=0; }
+ else { /* diagonal */ if (w>0) { a=-1; b=1; }
+ else { a=1; b=1; }}}
+ if ( (m > r[(i+a)*x_size+j+b]) && (m >= r[(i-a)*x_size+j-b]) &&
+ (m > r[(i+(2*a))*x_size+j+(2*b)]) && (m >= r[(i-(2*a))*x_size+j-(2*b)]) )
+ mid[i*x_size+j] = 2;
+ }
+ }
+ }
+}
+
+/* }}} */
+/* {{{ susan_edges_small(in,r,sf,max_no,out) */
+
+void susan_edges_small(uchar* in, int* r, uchar* mid, uchar* bp,
+ int max_no, int x_size, int y_size)
+{
+float z;
+int do_symmetry, i, j, m, n, a, b, x, y, w;
+uchar c,*p,*cp;
+
+ memset (r,0,x_size * y_size * sizeof(int));
+
+ max_no = 730; /* ho hum ;) */
+
+ for (i=1;i0)
+ {
+ m=r[i*x_size+j];
+ n=max_no - m;
+ cp=bp + in[i*x_size+j];
+
+ if (n>250)
+ {
+ p=in + (i-1)*x_size + j - 1;
+ x=0;y=0;
+
+ c=*(cp-*p++);x-=c;y-=c;
+ c=*(cp-*p++);y-=c;
+ c=*(cp-*p);x+=c;y-=c;
+ p+=x_size-2;
+
+ c=*(cp-*p);x-=c;
+ p+=2;
+ c=*(cp-*p);x+=c;
+ p+=x_size-2;
+
+ c=*(cp-*p++);x-=c;y+=c;
+ c=*(cp-*p++);y+=c;
+ c=*(cp-*p);x+=c;y+=c;
+
+ z = sqrt((float)((x*x) + (y*y)));
+ if (z > (0.4*(float)n)) /* 0.6 */
+ {
+ do_symmetry=0;
+ if (x==0)
+ z=1000000.0;
+ else
+ z=((float)y) / ((float)x);
+ if (z < 0) { z=-z; w=-1; }
+ else w=1;
+ if (z < 0.5) { /* vert_edge */ a=0; b=1; }
+ else { if (z > 2.0) { /* hor_edge */ a=1; b=0; }
+ else { /* diag_edge */ if (w>0) { a=1; b=1; }
+ else { a=-1; b=1; }}}
+ if ( (m > r[(i+a)*x_size+j+b]) && (m >= r[(i-a)*x_size+j-b]) )
+ mid[i*x_size+j] = 1;
+ }
+ else
+ do_symmetry=1;
+ }
+ else
+ do_symmetry=1;
+
+ if (do_symmetry==1)
+ {
+ p=in + (i-1)*x_size + j - 1;
+ x=0; y=0; w=0;
+
+ /* | \
+ y -x- w
+ | \ */
+
+ c=*(cp-*p++);x+=c;y+=c;w+=c;
+ c=*(cp-*p++);y+=c;
+ c=*(cp-*p);x+=c;y+=c;w-=c;
+ p+=x_size-2;
+
+ c=*(cp-*p);x+=c;
+ p+=2;
+ c=*(cp-*p);x+=c;
+ p+=x_size-2;
+
+ c=*(cp-*p++);x+=c;y+=c;w-=c;
+ c=*(cp-*p++);y+=c;
+ c=*(cp-*p);x+=c;y+=c;w+=c;
+
+ if (y==0)
+ z = 1000000.0;
+ else
+ z = ((float)x) / ((float)y);
+ if (z < 0.5) { /* vertical */ a=0; b=1; }
+ else { if (z > 2.0) { /* horizontal */ a=1; b=0; }
+ else { /* diagonal */ if (w>0) { a=-1; b=1; }
+ else { a=1; b=1; }}}
+ if ( (m > r[(i+a)*x_size+j+b]) && (m >= r[(i-a)*x_size+j-b]) )
+ mid[i*x_size+j] = 2;
+ }
+ }
+ }
+}
+
+/* }}} */
+
+/* }}} */
+/* {{{ corners */
+
+/* {{{ corner_draw(in,corner_list,drawing_mode) */
+
+void corner_draw(uchar* in, CORNER_LIST corner_list,
+ int x_size, int drawing_mode)
+{
+uchar *p;
+int n=0;
+
+ while(corner_list[n].info != 7)
+ {
+ if (drawing_mode==0)
+ {
+ p = in + (corner_list[n].y-1)*x_size + corner_list[n].x - 1;
+ *p++=255; *p++=255; *p=255; p+=x_size-2;
+ *p++=255; *p++=0; *p=255; p+=x_size-2;
+ *p++=255; *p++=255; *p=255;
+ n++;
+ }
+ else
+ {
+ p = in + corner_list[n].y*x_size + corner_list[n].x;
+ *p=0;
+ n++;
+ }
+ }
+}
+
+/* }}} */
+/* {{{ susan(in,r,sf,max_no,corner_list) */
+
+void susan_corners(uchar* in, int* r, uchar* bp,
+ int max_no, CORNER_LIST corner_list,
+ int x_size, int y_size)
+{
+int n,x,y,sq,xx,yy,
+ i,j,*cgx,*cgy;
+float divide;
+uchar c,*p,*cp;
+
+ memset (r,0,x_size * y_size * sizeof(int));
+
+ cgx=(int *)malloc(x_size*y_size*sizeof(int));
+ cgy=(int *)malloc(x_size*y_size*sizeof(int));
+
+ for (i=5;i ((n*n)/2) )
+ {
+ if(yy290){
+ r[i*x_size+j] = max_no-n;
+ cgx[i*x_size+j] = (51*x)/n;
+ cgy[i*x_size+j] = (51*y)/n;}
+ }
+ }
+}}}}}}}}}}}}}}}}}}}
+
+ /* to locate the local maxima */
+ n=0;
+ for (i=5;i0) {
+ /* 5x5 mask */
+#ifdef FIVE_SUPP
+ if (
+ (x>r[(i-1)*x_size+j+2]) &&
+ (x>r[(i )*x_size+j+1]) &&
+ (x>r[(i )*x_size+j+2]) &&
+ (x>r[(i+1)*x_size+j-1]) &&
+ (x>r[(i+1)*x_size+j ]) &&
+ (x>r[(i+1)*x_size+j+1]) &&
+ (x>r[(i+1)*x_size+j+2]) &&
+ (x>r[(i+2)*x_size+j-2]) &&
+ (x>r[(i+2)*x_size+j-1]) &&
+ (x>r[(i+2)*x_size+j ]) &&
+ (x>r[(i+2)*x_size+j+1]) &&
+ (x>r[(i+2)*x_size+j+2]) &&
+ (x>=r[(i-2)*x_size+j-2]) &&
+ (x>=r[(i-2)*x_size+j-1]) &&
+ (x>=r[(i-2)*x_size+j ]) &&
+ (x>=r[(i-2)*x_size+j+1]) &&
+ (x>=r[(i-2)*x_size+j+2]) &&
+ (x>=r[(i-1)*x_size+j-2]) &&
+ (x>=r[(i-1)*x_size+j-1]) &&
+ (x>=r[(i-1)*x_size+j ]) &&
+ (x>=r[(i-1)*x_size+j+1]) &&
+ (x>=r[(i )*x_size+j-2]) &&
+ (x>=r[(i )*x_size+j-1]) &&
+ (x>=r[(i+1)*x_size+j-2]) )
+#endif
+#ifdef SEVEN_SUPP
+ if (
+ (x>r[(i-3)*x_size+j-3]) &&
+ (x>r[(i-3)*x_size+j-2]) &&
+ (x>r[(i-3)*x_size+j-1]) &&
+ (x>r[(i-3)*x_size+j ]) &&
+ (x>r[(i-3)*x_size+j+1]) &&
+ (x>r[(i-3)*x_size+j+2]) &&
+ (x>r[(i-3)*x_size+j+3]) &&
+
+ (x>r[(i-2)*x_size+j-3]) &&
+ (x>r[(i-2)*x_size+j-2]) &&
+ (x>r[(i-2)*x_size+j-1]) &&
+ (x>r[(i-2)*x_size+j ]) &&
+ (x>r[(i-2)*x_size+j+1]) &&
+ (x>r[(i-2)*x_size+j+2]) &&
+ (x>r[(i-2)*x_size+j+3]) &&
+
+ (x>r[(i-1)*x_size+j-3]) &&
+ (x>r[(i-1)*x_size+j-2]) &&
+ (x>r[(i-1)*x_size+j-1]) &&
+ (x>r[(i-1)*x_size+j ]) &&
+ (x>r[(i-1)*x_size+j+1]) &&
+ (x>r[(i-1)*x_size+j+2]) &&
+ (x>r[(i-1)*x_size+j+3]) &&
+
+ (x>r[(i)*x_size+j-3]) &&
+ (x>r[(i)*x_size+j-2]) &&
+ (x>r[(i)*x_size+j-1]) &&
+ (x>=r[(i)*x_size+j+1]) &&
+ (x>=r[(i)*x_size+j+2]) &&
+ (x>=r[(i)*x_size+j+3]) &&
+
+ (x>=r[(i+1)*x_size+j-3]) &&
+ (x>=r[(i+1)*x_size+j-2]) &&
+ (x>=r[(i+1)*x_size+j-1]) &&
+ (x>=r[(i+1)*x_size+j ]) &&
+ (x>=r[(i+1)*x_size+j+1]) &&
+ (x>=r[(i+1)*x_size+j+2]) &&
+ (x>=r[(i+1)*x_size+j+3]) &&
+
+ (x>=r[(i+2)*x_size+j-3]) &&
+ (x>=r[(i+2)*x_size+j-2]) &&
+ (x>=r[(i+2)*x_size+j-1]) &&
+ (x>=r[(i+2)*x_size+j ]) &&
+ (x>=r[(i+2)*x_size+j+1]) &&
+ (x>=r[(i+2)*x_size+j+2]) &&
+ (x>=r[(i+2)*x_size+j+3]) &&
+
+ (x>=r[(i+3)*x_size+j-3]) &&
+ (x>=r[(i+3)*x_size+j-2]) &&
+ (x>=r[(i+3)*x_size+j-1]) &&
+ (x>=r[(i+3)*x_size+j ]) &&
+ (x>=r[(i+3)*x_size+j+1]) &&
+ (x>=r[(i+3)*x_size+j+2]) &&
+ (x>=r[(i+3)*x_size+j+3]) )
+#endif
+{
+corner_list[n].info=0;
+corner_list[n].x=j;
+corner_list[n].y=i;
+corner_list[n].dx=cgx[i*x_size+j];
+corner_list[n].dy=cgy[i*x_size+j];
+corner_list[n].I=in[i*x_size+j];
+n++;
+if(n==MAX_CORNERS){
+ fprintf(stderr,"Too many corners.\n");
+ exit(1);
+ }}}}
+corner_list[n].info=7;
+
+free(cgx);
+free(cgy);
+
+}
+
+/* }}} */
+/* {{{ susan_quick(in,r,sf,max_no,corner_list) */
+
+void susan_corners_quick(uchar* in, int* r, uchar* bp,
+ int max_no, CORNER_LIST corner_list,
+ int x_size, int y_size)
+{
+int n,x,y,i,j;
+uchar *p,*cp;
+
+ memset (r,0,x_size * y_size * sizeof(int));
+
+ for (i=7;i0) {
+ /* 5x5 mask */
+#ifdef FIVE_SUPP
+ if (
+ (x>r[(i-1)*x_size+j+2]) &&
+ (x>r[(i )*x_size+j+1]) &&
+ (x>r[(i )*x_size+j+2]) &&
+ (x>r[(i+1)*x_size+j-1]) &&
+ (x>r[(i+1)*x_size+j ]) &&
+ (x>r[(i+1)*x_size+j+1]) &&
+ (x>r[(i+1)*x_size+j+2]) &&
+ (x>r[(i+2)*x_size+j-2]) &&
+ (x>r[(i+2)*x_size+j-1]) &&
+ (x>r[(i+2)*x_size+j ]) &&
+ (x>r[(i+2)*x_size+j+1]) &&
+ (x>r[(i+2)*x_size+j+2]) &&
+ (x>=r[(i-2)*x_size+j-2]) &&
+ (x>=r[(i-2)*x_size+j-1]) &&
+ (x>=r[(i-2)*x_size+j ]) &&
+ (x>=r[(i-2)*x_size+j+1]) &&
+ (x>=r[(i-2)*x_size+j+2]) &&
+ (x>=r[(i-1)*x_size+j-2]) &&
+ (x>=r[(i-1)*x_size+j-1]) &&
+ (x>=r[(i-1)*x_size+j ]) &&
+ (x>=r[(i-1)*x_size+j+1]) &&
+ (x>=r[(i )*x_size+j-2]) &&
+ (x>=r[(i )*x_size+j-1]) &&
+ (x>=r[(i+1)*x_size+j-2]) )
+#endif
+#ifdef SEVEN_SUPP
+ if (
+ (x>r[(i-3)*x_size+j-3]) &&
+ (x>r[(i-3)*x_size+j-2]) &&
+ (x>r[(i-3)*x_size+j-1]) &&
+ (x>r[(i-3)*x_size+j ]) &&
+ (x>r[(i-3)*x_size+j+1]) &&
+ (x>r[(i-3)*x_size+j+2]) &&
+ (x>r[(i-3)*x_size+j+3]) &&
+
+ (x>r[(i-2)*x_size+j-3]) &&
+ (x>r[(i-2)*x_size+j-2]) &&
+ (x>r[(i-2)*x_size+j-1]) &&
+ (x>r[(i-2)*x_size+j ]) &&
+ (x>r[(i-2)*x_size+j+1]) &&
+ (x>r[(i-2)*x_size+j+2]) &&
+ (x>r[(i-2)*x_size+j+3]) &&
+
+ (x>r[(i-1)*x_size+j-3]) &&
+ (x>r[(i-1)*x_size+j-2]) &&
+ (x>r[(i-1)*x_size+j-1]) &&
+ (x>r[(i-1)*x_size+j ]) &&
+ (x>r[(i-1)*x_size+j+1]) &&
+ (x>r[(i-1)*x_size+j+2]) &&
+ (x>r[(i-1)*x_size+j+3]) &&
+
+ (x>r[(i)*x_size+j-3]) &&
+ (x>r[(i)*x_size+j-2]) &&
+ (x>r[(i)*x_size+j-1]) &&
+ (x>=r[(i)*x_size+j+1]) &&
+ (x>=r[(i)*x_size+j+2]) &&
+ (x>=r[(i)*x_size+j+3]) &&
+
+ (x>=r[(i+1)*x_size+j-3]) &&
+ (x>=r[(i+1)*x_size+j-2]) &&
+ (x>=r[(i+1)*x_size+j-1]) &&
+ (x>=r[(i+1)*x_size+j ]) &&
+ (x>=r[(i+1)*x_size+j+1]) &&
+ (x>=r[(i+1)*x_size+j+2]) &&
+ (x>=r[(i+1)*x_size+j+3]) &&
+
+ (x>=r[(i+2)*x_size+j-3]) &&
+ (x>=r[(i+2)*x_size+j-2]) &&
+ (x>=r[(i+2)*x_size+j-1]) &&
+ (x>=r[(i+2)*x_size+j ]) &&
+ (x>=r[(i+2)*x_size+j+1]) &&
+ (x>=r[(i+2)*x_size+j+2]) &&
+ (x>=r[(i+2)*x_size+j+3]) &&
+
+ (x>=r[(i+3)*x_size+j-3]) &&
+ (x>=r[(i+3)*x_size+j-2]) &&
+ (x>=r[(i+3)*x_size+j-1]) &&
+ (x>=r[(i+3)*x_size+j ]) &&
+ (x>=r[(i+3)*x_size+j+1]) &&
+ (x>=r[(i+3)*x_size+j+2]) &&
+ (x>=r[(i+3)*x_size+j+3]) )
+#endif
+{
+corner_list[n].info=0;
+corner_list[n].x=j;
+corner_list[n].y=i;
+x = in[(i-2)*x_size+j-2] + in[(i-2)*x_size+j-1] + in[(i-2)*x_size+j] + in[(i-2)*x_size+j+1] + in[(i-2)*x_size+j+2] +
+ in[(i-1)*x_size+j-2] + in[(i-1)*x_size+j-1] + in[(i-1)*x_size+j] + in[(i-1)*x_size+j+1] + in[(i-1)*x_size+j+2] +
+ in[(i )*x_size+j-2] + in[(i )*x_size+j-1] + in[(i )*x_size+j] + in[(i )*x_size+j+1] + in[(i )*x_size+j+2] +
+ in[(i+1)*x_size+j-2] + in[(i+1)*x_size+j-1] + in[(i+1)*x_size+j] + in[(i+1)*x_size+j+1] + in[(i+1)*x_size+j+2] +
+ in[(i+2)*x_size+j-2] + in[(i+2)*x_size+j-1] + in[(i+2)*x_size+j] + in[(i+2)*x_size+j+1] + in[(i+2)*x_size+j+2];
+
+corner_list[n].I=x/25;
+/*corner_list[n].I=in[i*x_size+j];*/
+x = in[(i-2)*x_size+j+2] + in[(i-1)*x_size+j+2] + in[(i)*x_size+j+2] + in[(i+1)*x_size+j+2] + in[(i+2)*x_size+j+2] -
+ (in[(i-2)*x_size+j-2] + in[(i-1)*x_size+j-2] + in[(i)*x_size+j-2] + in[(i+1)*x_size+j-2] + in[(i+2)*x_size+j-2]);
+x += x + in[(i-2)*x_size+j+1] + in[(i-1)*x_size+j+1] + in[(i)*x_size+j+1] + in[(i+1)*x_size+j+1] + in[(i+2)*x_size+j+1] -
+ (in[(i-2)*x_size+j-1] + in[(i-1)*x_size+j-1] + in[(i)*x_size+j-1] + in[(i+1)*x_size+j-1] + in[(i+2)*x_size+j-1]);
+
+y = in[(i+2)*x_size+j-2] + in[(i+2)*x_size+j-1] + in[(i+2)*x_size+j] + in[(i+2)*x_size+j+1] + in[(i+2)*x_size+j+2] -
+ (in[(i-2)*x_size+j-2] + in[(i-2)*x_size+j-1] + in[(i-2)*x_size+j] + in[(i-2)*x_size+j+1] + in[(i-2)*x_size+j+2]);
+y += y + in[(i+1)*x_size+j-2] + in[(i+1)*x_size+j-1] + in[(i+1)*x_size+j] + in[(i+1)*x_size+j+1] + in[(i+1)*x_size+j+2] -
+ (in[(i-1)*x_size+j-2] + in[(i-1)*x_size+j-1] + in[(i-1)*x_size+j] + in[(i-1)*x_size+j+1] + in[(i-1)*x_size+j+2]);
+corner_list[n].dx=x/15;
+corner_list[n].dy=y/15;
+n++;
+if(n==MAX_CORNERS){
+ fprintf(stderr,"Too many corners.\n");
+ exit(1);
+ }}}}
+corner_list[n].info=7;
+}
+
+/* }}} */
+
+/* }}} */
+/* {{{ main(argc, argv) */
+
+int main(int argc, char* argv[])
+{
+/* {{{ vars */
+
+char *tcp;
+uchar *in, *bp, *mid;
+float dt=4.0;
+int *r,
+ argindex=3,
+ bt=20,
+ principle=0,
+ thin_post_proc=1,
+ three_by_three=0,
+ drawing_mode=0,
+ susan_quick=0,
+ max_no_corners=1850,
+ max_no_edges=2650,
+ mode = 0,
+ x_size, y_size;
+CORNER_LIST corner_list;
+
+/* }}} */
+
+ long ct_repeat=0;
+ long ct_repeat_max=1;
+ int ct_return=0;
+
+#ifdef OPENME
+ openme_init(NULL,NULL,NULL,0);
+ openme_callback("PROGRAM_START", NULL);
+#endif
+#ifdef XOPENME
+ xopenme_init(1,2);
+#endif
+
+ if (getenv("CT_REPEAT_MAIN")!=NULL) ct_repeat_max=atol(getenv("CT_REPEAT_MAIN"));
+
+ if (argc<3)
+ usage();
+
+ get_image(argv[1],&in,&x_size,&y_size);
+
+#ifdef XOPENME
+ xopenme_add_var_i(0, " \"image_size_x\":%u", x_size);
+ xopenme_add_var_i(1, " \"image_size_y\":%u", y_size);
+#endif
+
+// printf("Size X=%u Size Y=%u\n", x_size, y_size);
+ /* FGG - changing dataset size */
+// x_size=8;
+// y_size=8;
+// printf("Size X=%u Size Y=%u\n", x_size, y_size);
+
+ /* {{{ look at options */
+
+ while (argindex < argc)
+ {
+ tcp = argv[argindex];
+ if (*tcp == '-')
+ switch (*++tcp)
+ {
+ case 's': /* smoothing */
+ mode=0;
+ break;
+ case 'e': /* edges */
+ mode=1;
+ break;
+ case 'c': /* corners */
+ mode=2;
+ break;
+ case 'p': /* principle */
+ principle=1;
+ break;
+ case 'n': /* thinning post processing */
+ thin_post_proc=0;
+ break;
+ case 'b': /* simple drawing mode */
+ drawing_mode=1;
+ break;
+ case '3': /* 3x3 flat mask */
+ three_by_three=1;
+ break;
+ case 'q': /* quick susan mask */
+ susan_quick=1;
+ break;
+ case 'd': /* distance threshold */
+ if (++argindex >= argc){
+ printf ("No argument following -d\n");
+ exit(0);}
+ dt=atof(argv[argindex]);
+ if (dt<0) three_by_three=1;
+ break;
+ case 't': /* brightness threshold */
+ if (++argindex >= argc){
+ printf ("No argument following -t\n");
+ exit(0);}
+ bt=atoi(argv[argindex]);
+ break;
+ }
+ else
+ usage();
+ argindex++;
+ }
+
+ if ( (principle==1) && (mode==0) )
+ mode=1;
+
+/* }}} */
+ /* {{{ main processing */
+
+#ifdef OPENME
+ openme_callback("KERNEL_START", NULL);
+#endif
+#ifdef XOPENME
+ xopenme_clock_start(0);
+#endif
+
+ for (ct_repeat=0; ct_repeat
+Click if you want to use Python virtual environment
+
+We suggest you to install a python virtual environment via CM though it's not strictly necessary
+(CM can automatically detect and reuse your Python installation and environments):
+```bash
+cm run script "install python-venv" --name=loadgen
+```
+
+You can also install a specific version of Python on your system via:
+```bash
+cm run script "install python-venv" --name=loadgen --version=3.10.7
+```
+
+By default, CM will be asking users to select one from all detected and installed Python versions
+including the above one, any time a script with python dependency is run. To avoid that, you
+can set up the following environment variable with the name of the current virtual environment:
+
+```bash
+export CM_SCRIPT_EXTRA_CMD="--adr.python.name=loadgen"
+```
+
+The `--adr` flag stands for "Add to all Dependencies Recursively" and will find all sub-dependencies on other CM scripts
+
+
+
+
+### Install dependencies via CM (optional)
+
+
+Click if you want to install specific versions of dependencies
+
+You can skip this sub-section if you want CM to automatically detect already installed
+ONNX runtime on your system. Otherwise, follow the next steps to install the latest or specific
+version of ONNX runtime.
+
+
+### Download LoadGen sources from MLPerf inference benchmark
+
+```bash
+cm run script "get mlperf inference src" --version=r3.1
+```
+
+### Install MLPerf LoadGen
+We can now install loadgen via CM while forcing compiler dependency to GCC:
+
+```bash
+cm run script "get mlperf loadgen"
+```
+
+### ONNX, CPU
+
+```bash
+cm run script "get generic-python-lib _onnxruntime"
+```
+
+or
+
+```bash
+cm run script "get generic-python-lib _onnxruntime" --version=1.13.1
+```
+
+or
+
+```bash
+cm run script "get generic-python-lib _onnxruntime" --version_min=1.10.0
+```
+
+
+### Benchmark standard MLPerf model
+
+You can use CM variations prefixed by `_` to benchmark an official MLPerf model
+(_resnet50 or _retinanet):
+
+```
+cm run script "python app loadgen-generic _onnxruntime _retinanet" --samples=5
+cmr "python app loadgen-generic _onnxruntime _resnet50"
+```
+
+Normally, you should see the following performance report from the loadgen:
+
+
+
+
+
+Click to open
+
+```bash
+
+2022-12-06 16:51:39,279 INFO MainThread - __main__ main: Model: /home/gfursin/CM/repos/local/cache/9c825a0a06fb48e2/resnet50_v1.onnx
+2022-12-06 16:51:39,279 INFO MainThread - __main__ main: Runner: inline, Concurrency: 4
+2022-12-06 16:51:39,279 INFO MainThread - __main__ main: Results: results/resnet50_v1.onnx/inline
+2022-12-06 16:51:39,279 INFO MainThread - __main__ main: Test Started
+2022-12-06 16:51:39,399 INFO MainThread - loadgen.harness load_query_samples: Loaded 100 samples
+2022-12-06 16:51:55,723 INFO MainThread - loadgen.harness issue_query: Queries issued 550
+2022-12-06 16:51:55,725 INFO MainThread - loadgen.harness flush_queries: Queries flushed
+2022-12-06 16:51:55,731 INFO MainThread - loadgen.harness unload_query_samples: Unloaded samples
+================================================
+MLPerf Results Summary
+================================================
+SUT name : PySUT
+Scenario : Offline
+Mode : PerformanceOnly
+Samples per second: 33.6903
+Result is : VALID
+ Min duration satisfied : Yes
+ Min queries satisfied : Yes
+ Early stopping satisfied: Yes
+
+================================================
+Additional Stats
+================================================
+Min latency (ns) : 16325180169
+Max latency (ns) : 16325180169
+Mean latency (ns) : 16325180169
+50.00 percentile latency (ns) : 16325180169
+90.00 percentile latency (ns) : 16325180169
+95.00 percentile latency (ns) : 16325180169
+97.00 percentile latency (ns) : 16325180169
+99.00 percentile latency (ns) : 16325180169
+99.90 percentile latency (ns) : 16325180169
+
+================================================
+Test Parameters Used
+================================================
+samples_per_query : 550
+target_qps : 50
+target_latency (ns): 0
+max_async_queries : 1
+min_duration (ms): 10000
+max_duration (ms): 0
+min_query_count : 1
+max_query_count : 0
+qsl_rng_seed : 0
+sample_index_rng_seed : 0
+schedule_rng_seed : 0
+accuracy_log_rng_seed : 0
+accuracy_log_probability : 0
+accuracy_log_sampling_target : 0
+print_timestamps : 0
+performance_issue_unique : 0
+performance_issue_same : 0
+performance_issue_same_index : 0
+performance_sample_count : 100
+
+No warnings encountered during test.
+
+No errors encountered during test.
+2022-12-06 16:51:55,753 INFO MainThread - __main__ main: Observed QPS: 33.6903
+2022-12-06 16:51:55,753 INFO MainThread - __main__ main: Result: VALID
+2022-12-06 16:51:55,753 INFO MainThread - __main__ main: Test Completed
+
+ - Running postprocess ...
+ - running time of script "app,loadgen,generic,loadgen-generic,python": 370.87 sec.
+
+```
+
+
+
+
+### Benchmark custom model
+
+You can also specify any custom onnx model file as follows:
+
+```bash
+cm run script "python app loadgen-generic _onnxruntime" --modelpath=
+```
+
+### Benchmark Hugging Face model
+
+```bash
+cmr "python app loadgen-generic _onnxruntime _custom _huggingface _model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1" --adr.hf-downloader.model_filename=model.onnx
+```
+
+*See more examples to download Hugging Face models via CM [here](../get-ml-model-huggingface-zoo/README-extra.md).*
+
+### Benchmark using ONNX CUDA
+
+```bash
+cm rm cache -f
+cmr "python app loadgen-generic _onnxruntime _cuda _retinanet" --quiet
+cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1" --adr.hf-downloader.model_filename=model.onnx
+```
+
+These cases worked on Windows and Linux but may require GPU with > 8GB memory:
+```bash
+cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.steerapi/Llama-2-7b-chat-hf-onnx-awq-w8" --adr.hf-downloader.model_filename=onnx/decoder_model_merged_quantized.onnx,onnx/decoder_model_merged_quantized.onnx_data --samples=2
+cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.alpindale/Llama-2-13b-ONNX" --adr.hf-downloader.model_filename=FP32/LlamaV2_13B_float32.onnx --adr.hf-downloader.full_subfolder=FP32 --samples=2
+cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.Intel/gpt-j-6B-int8-static" --adr.hf-downloader.model_filename=model.onnx --adr.hf-downloader.full_subfolder=. --samples=2
+```
+
+TBD: some cases that are not yet fully supported (data types, input mismatch, etc):
+```bash
+cmr "python app loadgen-generic _onnxruntime _custom _huggingface _model-stub.runwayml/stable-diffusion-v1-5" --adr.hf-downloader.revision=onnx --adr.hf-downloader.model_filename=unet/model.onnx,unet/weights.pb --samples=2
+cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.microsoft/Mistral-7B-v0.1-onnx" --adr.hf-downloader.model_filename=Mistral-7B-v0.1.onnx,Mistral-7B-v0.1.onnx.data --samples=2
+cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.alpindale/Llama-2-7b-ONNX" --adr.hf-downloader.model_filename=FP16/LlamaV2_7B_float16.onnx --adr.hf-downloader.full_subfolder=FP16 --samples=2
+```
+
+### Other variations and flags:
+
+You can obtain help about flags and variations from CMD:
+
+```bash
+cm run script "python app loadgen-generic" --help
+
+Available variations:
+
+ _cpu
+ _cuda
+ _custom
+ _custom,huggingface
+ _huggingface
+ _model-stub.#
+ _onnxruntime
+ _pytorch
+ _resnet50
+ _retinanet
+
+Available flags mapped to environment variables:
+
+ --concurrency -> --env.CM_MLPERF_CONCURRENCY
+ --ep -> --env.CM_MLPERF_EXECUTION_PROVIDER
+ --execmode -> --env.CM_MLPERF_EXEC_MODE
+ --interop -> --env.CM_MLPERF_INTEROP
+ --intraop -> --env.CM_MLPERF_INTRAOP
+ --modelpath -> --env.CM_ML_MODEL_FILE_WITH_PATH
+ --output_dir -> --env.CM_MLPERF_OUTPUT_DIR
+ --runner -> --env.CM_MLPERF_RUNNER
+ --samples -> --env.CM_MLPERF_LOADGEN_SAMPLES
+ --scenario -> --env.CM_MLPERF_LOADGEN_SCENARIO
+
+```
+
+## Running this app via Docker
+
+```bash
+cm docker script "python app loadgen-generic _onnxruntime _custom _huggingface _model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1" --adr.hf-downloader.model_filename=model.onnx --samples=2 --output_dir=new_results --docker_cm_repo=ctuning@mlcommons-ck
+```
+
+## Tuning CPU performance via CM experiment
+
+```bash
+cm run experiment --tags=loadgen,python,llama2 -- cmr script "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.steerapi/Llama-2-7b-chat-hf-onnx-awq-w8" --adr.hf-downloader.model_filename=onnx/decoder_model_merged_quantized.onnx,onnx/decoder_model_merged_quantized.onnx_data --samples=2 --intraop={{CM_OPT_INTRAOP{[1,2,4]}}} --interop={{CM_OPT_INTEROP{[1,2,4]}}} --quiet
+cm run experiment --tags=loadgen,python,llama2 -- cmr "python app loadgen-generic _onnxruntime" --modelpath={PATH TO ONNX MODEL} --samples=2 --intraop={{CM_OPT_INTRAOP{[1,2,4]}}} --interop={{CM_OPT_INTEROP{[1,2,4]}}} --quiet
+```
+
+
+## Developers
+
+* [Gaz Iqbal](https://www.linkedin.com/in/gaziqbal)
+* [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh)
+* [Grigori Fursin](https://cKnowledge.org/gfursin)
+
+## Get in touch
+
+* [MLCommons Task Force on Automation and Reproducibility](../../../docs/taskforce.md)
+* [Public Discord server](https://discord.gg/JjWNWXKxwT)
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/README.md b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/README.md
new file mode 100644
index 0000000000..1e00049c69
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/README.md
@@ -0,0 +1 @@
+Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-loadgen-generic-python](https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-loadgen-generic-python) for the documentation of this CM script.
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/_cm.yaml
new file mode 100644
index 0000000000..3e5fe56e12
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/_cm.yaml
@@ -0,0 +1,326 @@
+# Identification of this CM script
+alias: app-loadgen-generic-python
+uid: d3d949cc361747a6
+
+automation_alias: script
+automation_uid: 5b4e0237da074764
+
+category: "Modular MLPerf inference benchmark pipeline"
+
+developers: "[Gaz Iqbal](https://www.linkedin.com/in/gaziqbal), [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin)"
+
+
+# User-friendly tags to find this CM script
+tags:
+ - app
+ - loadgen
+ - generic
+ - loadgen-generic
+ - python
+
+tags_help: "python app generic loadgen"
+
+
+# Default environment
+default_env:
+ CM_MLPERF_EXECUTION_MODE: parallel
+ CM_MLPERF_BACKEND: onnxruntime
+
+# Map script inputs to environment variables
+input_mapping:
+ modelpath: CM_ML_MODEL_FILE_WITH_PATH
+ modelcodepath: CM_ML_MODEL_CODE_WITH_PATH
+ modelcfgpath: CM_ML_MODEL_CFG_WITH_PATH
+ modelcfg: CM_ML_MODEL_CFG
+ modelsamplepath: CM_ML_MODEL_SAMPLE_WITH_PATH
+ output_dir: CM_MLPERF_OUTPUT_DIR
+ scenario: CM_MLPERF_LOADGEN_SCENARIO
+ runner: CM_MLPERF_RUNNER
+ concurrency: CM_MLPERF_CONCURRENCY
+ ep: CM_MLPERF_EXECUTION_PROVIDER
+ intraop: CM_MLPERF_INTRAOP
+ interop: CM_MLPERF_INTEROP
+ execmode: CM_MLPERF_EXEC_MODE
+ samples: CM_MLPERF_LOADGEN_SAMPLES
+ loadgen_expected_qps: CM_MLPERF_LOADGEN_EXPECTED_QPS
+ loadgen_duration_sec: CM_MLPERF_LOADGEN_DURATION_SEC
+
+# New env keys exported from this script
+new_env_keys:
+ - CM_MLPERF_*
+
+# Dependencies on other CM scripts
+
+deps:
+
+ # Detect host OS features
+ - tags: detect,os
+
+ # Detect host CPU features
+ - tags: detect,cpu
+
+ # Get Python
+ - tags: get,python3
+ names:
+ - python
+ - python3
+
+ # Extra package
+ - tags: get,generic-python-lib,_psutil
+ - tags: get,generic-python-lib,_package.numpy
+ version_max: "1.99.99"
+
+ # Detect CUDA if required
+ - tags: get,cuda
+ enable_if_env:
+ CM_MLPERF_DEVICE:
+ - gpu
+
+ # Install loadgen
+ - tags: get,loadgen
+ names:
+ - loadgen
+
+ ########################################################################
+ # Install ML engines via CM
+ # ONNX
+ - enable_if_env:
+ CM_MLPERF_BACKEND:
+ - onnxruntime
+ CM_MLPERF_DEVICE:
+ - cpu
+ tags: get,generic-python-lib,_onnxruntime
+ names:
+ - onnxruntime
+
+ - enable_if_env:
+ CM_MLPERF_BACKEND:
+ - onnxruntime
+ CM_MLPERF_DEVICE:
+ - gpu
+ tags: get,generic-python-lib,_onnxruntime_gpu
+ names:
+ - onnxruntime
+
+ - enable_if_env:
+ CM_MLPERF_BACKEND:
+ - onnxruntime
+ tags: get,generic-python-lib,_onnx
+ names:
+ - onnx
+
+ ########################################################################
+ # Install ML engines via CM
+ # PyTorch
+
+ # CPU
+
+ - enable_if_env:
+ CM_MLPERF_BACKEND:
+ - pytorch
+ CM_MLPERF_DEVICE:
+ - cpu
+ tags: get,generic-python-lib,_torch
+ names:
+ - torch
+
+ - enable_if_env:
+ CM_MLPERF_BACKEND:
+ - pytorch
+ CM_MLPERF_DEVICE:
+ - cpu
+ tags: get,generic-python-lib,_torchvision
+ names:
+ - torchvision
+
+ # CUDA/GPU
+
+ - enable_if_env:
+ CM_MLPERF_BACKEND:
+ - pytorch
+ CM_MLPERF_DEVICE:
+ - gpu
+ tags: get,generic-python-lib,_torch_cuda
+ names:
+ - torch
+
+ - enable_if_env:
+ CM_MLPERF_BACKEND:
+ - pytorch
+ CM_MLPERF_DEVICE:
+ - gpu
+ tags: get,generic-python-lib,_torchvision_cuda
+ names:
+ - torchvision
+
+
+
+ ########################################################################
+ # Install MLPerf models
+ - enable_if_env:
+ CM_MODEL:
+ - resnet50
+ tags: get,ml-model,resnet50,_onnx
+
+ - enable_if_env:
+ CM_MODEL:
+ - retinanet
+ tags: get,ml-model,retinanet,_onnx,_fp32
+
+ - enable_if_env:
+ CM_MODEL:
+ - retinanet
+ tags: get,ml-model,retinanet,_onnx,_fp32
+
+
+
+
+# Customize this CM script
+variations:
+
+ pytorch:
+ group: backend
+ env:
+ CM_MLPERF_BACKEND:
+ pytorch
+
+ onnxruntime:
+ group: backend
+ default: true
+ env:
+ CM_MLPERF_BACKEND:
+ onnxruntime
+
+
+
+ cpu:
+ group:
+ device
+ default:
+ true
+ env:
+ CM_MLPERF_DEVICE:
+ cpu
+ CM_MLPERF_EXECUTION_PROVIDER:
+ CPUExecutionProvider
+
+ cuda:
+ docker:
+ all_gpus: 'yes'
+ base_image: nvcr.io/nvidia/pytorch:24.03-py3
+ group:
+ device
+ env:
+ CM_MLPERF_DEVICE:
+ gpu
+ CM_MLPERF_EXECUTION_PROVIDER:
+ CUDAExecutionProvider
+
+
+
+ retinanet:
+ group:
+ models
+ env:
+ CM_MODEL: retinanet
+
+ resnet50:
+ group:
+ models
+ env:
+ CM_MODEL: resnet50
+
+ custom:
+ group:
+ models
+ env:
+ CM_MODEL: custom
+
+
+
+ huggingface:
+ env:
+ CM_CUSTOM_MODEL_SOURCE: huggingface
+
+ custom,huggingface:
+ deps:
+ - tags: get,ml-model,huggingface
+ names:
+ - hf-downloader
+ update_tags_from_env_with_prefix:
+ "_model-stub.":
+ - CM_ML_MODEL_STUB
+
+ model-stub.#:
+ env:
+ CM_ML_MODEL_STUB: "#"
+
+
+ cmc:
+ env:
+ CM_CUSTOM_MODEL_CMC: yes
+
+
+ custom,cmc:
+ deps:
+ - tags: get,ml-model,cmc
+ names:
+ - cmc-model
+
+
+input_description:
+ modelpath:
+ desc: Full path to file with model weights
+ modelcodepath:
+ desc: (for PyTorch models) Full path to file with model code and cmc.py
+ modelcfgpath:
+ desc: (for PyTorch models) Full path to JSON file with model cfg
+ modelsamplepath:
+ desc: (for PyTorch models) Full path to file with model sample in pickle format
+ ep:
+ desc: ONNX Execution provider
+ scenario:
+ desc: MLPerf LoadGen scenario
+ samples:
+ desc: Number of samples
+ default: 2
+ runner:
+ desc: MLPerf runner
+ execmode:
+ desc: MLPerf exec mode
+ output_dir:
+ desc: MLPerf output directory
+ concurrency:
+ desc: MLPerf concurrency
+ intraop:
+ desc: MLPerf intra op threads
+ interop:
+ desc: MLPerf inter op threads
+
+
+docker:
+ skip_run_cmd: 'no'
+ input_paths:
+ - modelpath
+ - modelsamplepath
+ - env.CM_ML_MODEL_FILE_WITH_PATH
+ - env.CM_ML_MODEL_CODE_WITH_PATH
+ - output_dir
+ - repro_dir
+ skip_input_for_fake_run:
+ - modelpath
+ - modelsamplepath
+ - env.CM_ML_MODEL_FILE_WITH_PATH
+ - env.CM_ML_MODEL_CODE_WITH_PATH
+ - output_dir
+ - scenario
+ - runner
+ - repro_dir
+ - concurrency
+ - intraop
+ - interop
+ - execmode
+ - samples
+ - modelcfg.num_classes
+ - modelcfg.config
+ - repro
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/customize.py b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/customize.py
new file mode 100644
index 0000000000..34720c052d
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/customize.py
@@ -0,0 +1,117 @@
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+# Developer: Grigori Fursin
+
+from cmind import utils
+import os
+import shutil
+
+
+def preprocess(i):
+
+ os_info = i['os_info']
+
+ env = i['env']
+
+ if 'CM_ML_MODEL_FILE_WITH_PATH' not in env:
+ return {
+ 'return': 1, 'error': 'Please select a variation specifying the model to run'}
+
+ run_opts = env.get('CM_RUN_OPTS', '')
+
+ if env.get('CM_MLPERF_BACKEND', '') != '':
+ run_opts += " -b " + env['CM_MLPERF_BACKEND']
+
+ if env.get('CM_MLPERF_RUNNER', '') != '':
+ run_opts += " -r " + env['CM_MLPERF_RUNNER']
+
+ if env.get('CM_MLPERF_CONCURRENCY', '') != '':
+ run_opts += " --concurrency " + env['CM_MLPERF_CONCURRENCY']
+
+ if env.get('CM_MLPERF_EXECUTION_PROVIDER', '') != '':
+ run_opts += " --ep " + env['CM_MLPERF_EXECUTION_PROVIDER']
+
+ if env.get('CM_MLPERF_INTRAOP', '') != '':
+ run_opts += " --intraop " + env['CM_MLPERF_INTRAOP']
+
+ if env.get('CM_MLPERF_INTEROP', '') != '':
+ run_opts += " --interop " + env['CM_MLPERF_INTEROP']
+
+ if env.get('CM_MLPERF_EXECMODE', '') != '':
+ run_opts += " --execmode " + env['CM_MLPERF_EXECUTION_MODE']
+
+ if env.get('CM_MLPERF_LOADGEN_SAMPLES', '') != '':
+ run_opts += " --samples " + env['CM_MLPERF_LOADGEN_SAMPLES']
+
+ if env.get('CM_MLPERF_LOADGEN_EXPECTED_QPS', '') != '':
+ run_opts += " --loadgen_expected_qps " + \
+ env['CM_MLPERF_LOADGEN_EXPECTED_QPS']
+
+ if env.get('CM_MLPERF_LOADGEN_DURATION_SEC', '') != '':
+ run_opts += " --loadgen_duration_sec " + \
+ env['CM_MLPERF_LOADGEN_DURATION_SEC']
+
+ if env.get('CM_MLPERF_OUTPUT_DIR', '') != '':
+ run_opts += " --output " + env['CM_MLPERF_OUTPUT_DIR']
+
+ if env.get('CM_ML_MODEL_CODE_WITH_PATH', '') != '':
+ run_opts += " --model_code " + env['CM_ML_MODEL_CODE_WITH_PATH']
+
+ if env.get('CM_ML_MODEL_CFG_WITH_PATH', '') != '':
+ run_opts += " --model_cfg " + env['CM_ML_MODEL_CFG_WITH_PATH']
+ else:
+ # Check cfg from command line
+ cfg = env.get('CM_ML_MODEL_CFG', {})
+ if len(cfg) > 0:
+ del (env['CM_ML_MODEL_CFG'])
+
+ import json
+ import tempfile
+ tfile = tempfile.NamedTemporaryFile(mode="w+", suffix='.json')
+
+ fd, tfile = tempfile.mkstemp(suffix='.json', prefix='cm-cfg-')
+ os.close(fd)
+
+ with open(tfile, 'w') as fd:
+ json.dump(cfg, fd)
+
+ env['CM_APP_LOADGEN_GENERIC_PYTHON_TMP_CFG_FILE'] = tfile
+
+ run_opts += " --model_cfg " + tfile
+
+ if env.get('CM_ML_MODEL_SAMPLE_WITH_PATH', '') != '':
+ run_opts += " --model_sample_pickle " + \
+ env['CM_ML_MODEL_SAMPLE_WITH_PATH']
+
+ # Add path to file model weights at the end of command line
+
+ run_opts += ' ' + env['CM_ML_MODEL_FILE_WITH_PATH']
+
+ env['CM_RUN_OPTS'] = run_opts
+
+ print('')
+ print('Assembled flags: {}'.format(run_opts))
+ print('')
+
+ return {'return': 0}
+
+
+def postprocess(i):
+
+ env = i['env']
+
+ tfile = env.get('CM_APP_LOADGEN_GENERIC_PYTHON_TMP_CFG_FILE', '')
+
+ if tfile != '' and os.path.isfile(tfile):
+ os.remove(tfile)
+
+ return {'return': 0}
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/run.bat b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/run.bat
new file mode 100644
index 0000000000..3d4b5d58b3
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/run.bat
@@ -0,0 +1,4 @@
+rem native script
+
+%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\src\main.py %CM_RUN_OPTS%
+IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/run.sh
new file mode 100644
index 0000000000..2a13312f07
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/run.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/src/main.py ${CM_RUN_OPTS}
+test $? -eq 0 || exit 1
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/backend_onnxruntime.py b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/backend_onnxruntime.py
new file mode 100644
index 0000000000..371f44ffbe
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/backend_onnxruntime.py
@@ -0,0 +1,92 @@
+import typing
+
+import numpy as np
+import onnx
+import onnxruntime as ort
+
+from loadgen.model import Model, ModelFactory, ModelInput, ModelInputSampler
+
+xinput = input
+
+ONNX_TO_NP_TYPE_MAP = {
+ "tensor(bool)": bool,
+ "tensor(int)": np.int32,
+ "tensor(int32)": np.int32,
+ "tensor(int8)": np.int8,
+ "tensor(uint8)": np.uint8,
+ "tensor(int16)": np.int16,
+ "tensor(uint16)": np.uint16,
+ "tensor(uint64)": np.uint64,
+ "tensor(int64)": np.int64,
+ "tensor(float16)": np.float16,
+ "tensor(float)": np.float32,
+ "tensor(double)": np.float64,
+ "tensor(string)": np.string_,
+}
+
+
+class XModel(Model):
+ def __init__(self, session: ort.InferenceSession):
+ assert session is not None
+ self.session = session
+
+ def predict(self, input: ModelInput):
+ output = self.session.run(None, input)
+ return output
+
+
+class XModelFactory(ModelFactory):
+ def __init__(
+ self,
+ model_path: str,
+ execution_provider="CPUExecutionProvider",
+ execution_mode="",
+ intra_op_threads=0,
+ inter_op_threads=0,
+ model_code='', # Not used here
+ model_cfg={}, # Not used here
+ model_sample_pickle='' # Not used here
+ ):
+ self.model_path = model_path
+ self.execution_provider = execution_provider
+ self.session_options = ort.SessionOptions()
+ if execution_mode.lower() == "sequential":
+ self.session_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
+ elif execution_mode.lower() == "parallel":
+ self.session_options.execution_mode = ort.ExecutionMode.ORT_PARALLEL
+ self.session_options.intra_op_num_threads = intra_op_threads
+ self.session_options.inter_op_num_threads = inter_op_threads
+
+ def create(self) -> Model:
+ print('Loading model: {}'.format(self.model_path))
+# model = onnx.load(self.model_path)
+ session_eps = [self.execution_provider]
+ session = ort.InferenceSession(
+ # model.SerializeToString(), self.session_options,
+ # providers=session_eps
+ self.model_path, self.session_options, providers=session_eps
+ )
+ return XModel(session)
+
+
+class XModelInputSampler(ModelInputSampler):
+ def __init__(self, model_factory: XModelFactory):
+ model = model_factory.create()
+ input_defs = model.session.get_inputs()
+ self.inputs: typing.Dict[str,
+ typing.Tuple[np.dtype,
+ typing.List[int]]] = dict()
+ for input in input_defs:
+ input_name = input.name
+ input_type = ONNX_TO_NP_TYPE_MAP[input.type]
+ input_dim = [
+ 1 if (x is None or (isinstance(x, str))) else x for x in input.shape
+ ]
+ self.inputs[input_name] = (input_type, input_dim)
+
+ def sample(self, id_: int) -> ModelInput:
+ input = dict()
+ for name, spec in self.inputs.items():
+ val = np.random.random_sample(spec[1]).astype(spec[0])
+ input[name] = val
+ return input
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/backend_pytorch.py b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/backend_pytorch.py
new file mode 100644
index 0000000000..6fb7160282
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/backend_pytorch.py
@@ -0,0 +1,132 @@
+# Developer: Grigori Fursin
+
+import typing
+import importlib
+import os
+import psutil
+
+import utils
+
+import numpy as np
+
+import torch
+
+from loadgen.model import Model, ModelFactory, ModelInput, ModelInputSampler
+
+
+xinput = input
+
+
+class XModel(Model):
+ def __init__(self, session):
+ assert session is not None
+ self.session = session
+
+ def predict(self, input: ModelInput):
+
+ print('')
+ utils.print_host_memory_use('Host memory used')
+
+ print('Running inference ...')
+ with torch.no_grad():
+ output = self.session(input)
+
+ utils.print_host_memory_use('Host memory used')
+
+ return output
+
+
+class XModelFactory(ModelFactory):
+ def __init__(
+ self,
+ model_path: str,
+ execution_provider="CPUExecutionProvider",
+ execution_mode="",
+ intra_op_threads=0,
+ inter_op_threads=0,
+ model_code='',
+ model_cfg={},
+ model_sample_pickle=''
+ ):
+
+ self.model_path = model_path
+ self.model_code = model_code
+ self.model_cfg = model_cfg
+ self.model_sample_pickle = model_sample_pickle
+ self.execution_provider = execution_provider
+
+ def create(self) -> Model:
+ print('')
+ print('Loading model: {}'.format(self.model_path))
+
+ if self.execution_provider == 'CPUExecutionProvider':
+ torch_provider = 'cpu'
+ elif self.execution_provider == 'CUDAExecutionProvider':
+ torch_provider = 'cuda'
+ if not torch.cuda.is_available():
+ raise Exception(
+ 'Error: CUDA is forced but not available or installed in PyTorch!')
+ else:
+ raise Exception(
+ 'Error: execution provider is unknown ({})!'.format(
+ self.execution_provider))
+
+ checkpoint = torch.load(self.model_path,
+ map_location=torch.device(torch_provider))
+
+ if self.model_code == '':
+ raise Exception('Error: path to model code was not provided!')
+
+ if self.model_sample_pickle == '':
+ raise Exception(
+ 'Error: path to model sample pickle was not provided!')
+
+ # Load sample
+ import pickle
+ with open(self.model_sample_pickle, 'rb') as handle:
+ self.input_sample = pickle.load(handle)
+
+ # Check if has CM connector
+ cm_model_module = os.path.join(self.model_code, 'cmc.py')
+ if not os.path.isfile(cm_model_module):
+ raise Exception(
+ 'cm.py interface for a PyTorch model was not found in {}'.format(
+ self.model_code))
+
+ print('')
+ print('Collective Mind Connector for the model found: {}'.format(
+ cm_model_module))
+
+ # Load CM interface for the model
+ import sys
+ sys.path.insert(0, self.model_code)
+ model_module = importlib.import_module('cmc')
+ del (sys.path[0])
+
+ # Init model
+ if len(self.model_cfg) > 0:
+ print('Model cfg: {}'.format(self.model_cfg))
+
+ r = model_module.model_init(checkpoint, self.model_cfg)
+ if r['return'] > 0:
+ raise Exception('Error: {}'.format(r['error']))
+
+ model = r['model']
+
+ if torch_provider == 'cuda':
+ model.cuda()
+
+ model.eval()
+
+ return XModel(model)
+
+
+class XModelInputSampler(ModelInputSampler):
+ def __init__(self, model_factory: XModelFactory):
+ model = model_factory.create()
+ self.input_sample = model_factory.input_sample
+ return
+
+ def sample(self, id_: int) -> ModelInput:
+ input = self.input_sample
+ return input
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/harness.py b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/harness.py
new file mode 100644
index 0000000000..a8fdd4e86b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/harness.py
@@ -0,0 +1,77 @@
+import abc
+import contextlib
+import logging
+import typing
+
+import mlperf_loadgen
+
+from loadgen.model import ModelInput, ModelInputSampler
+
+logger = logging.getLogger(__name__)
+
+
+QueryInput = typing.Dict[int, ModelInput]
+QueryResult = typing.Dict[int, typing.Any]
+
+
+class ModelRunner(contextlib.AbstractContextManager):
+ @abc.abstractmethod
+ def issue_query(self, query: QueryInput) -> typing.Optional[QueryResult]:
+ pass
+
+ # Optional method to flush pending queries
+ def flush_queries(self) -> typing.Optional[QueryResult]:
+ pass
+
+ def __exit__(self, _exc_type, _exc_value, _traceback):
+ logger.info(f"{self} : Exited")
+ return None
+
+
+class Harness:
+ def __init__(self, sampler: ModelInputSampler, runner: ModelRunner):
+ self.sampler = sampler
+ self.runner = runner
+ self.samples = None
+
+ def load_query_samples(self, query_samples):
+ assert self.samples is None
+ self.samples = dict()
+ for query_id in query_samples:
+ self.samples[query_id] = self.sampler.sample(query_id)
+ logger.info(f"Loaded {len(self.samples)} samples")
+
+ def unload_query_samples(self, _query_samples):
+ assert self.samples is not None
+ logger.info(f"Unloaded samples")
+ self.samples = None
+
+ def issue_query(self, query_samples):
+ query_input = dict()
+ for q in query_samples:
+ # logger.info(f"Query Id: {q.id}, SampleIndex: {q.index}")
+ input = self.samples[q.index]
+ query_input[q.id] = input
+ result = self.runner.issue_query(query_input)
+ logger.info(f"Queries issued {len(query_input)}")
+ if result is not None:
+ self._complete_query(result)
+
+ # Called after the last call to issue queries in a series is made.
+ # Client can use this to flush any deferred queries rather than waiting
+ # for a timeout.
+ def flush_queries(self):
+ result = self.runner.flush_queries()
+ logger.info(f"Queries flushed")
+ if result is not None:
+ self._complete_query(result)
+
+ def _complete_query(self, result: QueryResult):
+ responses = []
+ for query_id, _query_result in result.items():
+ response_data, response_size = 0, 0
+ response = mlperf_loadgen.QuerySampleResponse(
+ query_id, response_data, response_size
+ )
+ responses.append(response)
+ mlperf_loadgen.QuerySamplesComplete(responses)
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/model.py b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/model.py
new file mode 100644
index 0000000000..8bb7dbf04c
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/model.py
@@ -0,0 +1,24 @@
+import abc
+import typing
+
+import numpy as np
+
+ModelInput = typing.Dict[str, np.array]
+
+
+class Model(abc.ABC):
+ @abc.abstractmethod
+ def predict(self, input: ModelInput) -> typing.Any:
+ pass
+
+
+class ModelFactory(abc.ABC):
+ @abc.abstractmethod
+ def create(self) -> Model:
+ pass
+
+
+class ModelInputSampler(abc.ABC):
+ @abc.abstractmethod
+ def sample(self, id: int) -> ModelInput:
+ pass
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/runners.py b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/runners.py
new file mode 100644
index 0000000000..9c813a0278
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/runners.py
@@ -0,0 +1,188 @@
+import abc
+import concurrent.futures
+import logging
+import multiprocessing
+import threading
+import typing
+
+from loadgen.harness import ModelRunner, QueryInput, QueryResult
+from loadgen.model import Model, ModelFactory, ModelInput
+
+logger = logging.getLogger(__name__)
+
+# Runner implementations
+
+
+class ModelRunnerInline(ModelRunner):
+ def __init__(self, model_factory: ModelFactory):
+ self.model = model_factory.create()
+
+ def issue_query(self, queries: QueryInput) -> typing.Optional[QueryResult]:
+ result = dict()
+ for query_id, model_input in queries.items():
+ output = self.model.predict(model_input)
+ result[query_id] = output
+ return result
+
+
+class ModelRunnerPoolExecutor(ModelRunner):
+ def __init__(self):
+ self.executor: concurrent.futures.Executor = None
+ self.futures = None
+
+ def __exit__(self, _exc_type, _exc_value, _traceback):
+ if self.executor:
+ self.executor.shutdown(True)
+ return super().__exit__(_exc_type, _exc_value, _traceback)
+
+ def issue_query(self, queries: QueryInput) -> typing.Optional[QueryResult]:
+ self.futures = dict()
+ predictor_fn = self.get_predictor()
+ for query_id, model_input in queries.items():
+ f = self.executor.submit(predictor_fn, model_input)
+ self.futures[f] = query_id
+ return None
+
+ def flush_queries(self) -> typing.Optional[QueryResult]:
+ result = dict()
+ for future in concurrent.futures.as_completed(self.futures.keys()):
+ query_id = self.futures[future]
+ query_result = future.result()
+ result[query_id] = query_result
+ return result
+
+ @abc.abstractmethod
+ def get_predictor(self) -> typing.Callable[[ModelInput], typing.Any]:
+ pass
+
+
+class ModelRunnerThreadPoolExecutor(ModelRunnerPoolExecutor):
+ def __init__(self, model_factory: ModelFactory, max_concurrency: int):
+ super().__init__()
+ self.model = model_factory.create()
+ self.max_concurrency = max_concurrency
+
+ def __enter__(self):
+ self.executor = concurrent.futures.ThreadPoolExecutor(
+ max_workers=self.max_concurrency, thread_name_prefix="LoadGen"
+ )
+ return self
+
+ def get_predictor(self) -> typing.Callable[[ModelInput], typing.Any]:
+ return self.model.predict
+
+
+class ModelRunnerThreadPoolExecutorWithTLS(ModelRunnerPoolExecutor):
+ tls: threading.local
+
+ def __init__(self, model_factory: ModelFactory, max_concurrency: int):
+ super().__init__()
+ self.model_factory = model_factory
+ self.max_concurrency = max_concurrency
+
+ def __enter__(self):
+ self.executor = concurrent.futures.ThreadPoolExecutor(
+ max_workers=self.max_concurrency,
+ thread_name_prefix="LoadGen",
+ initializer=ModelRunnerThreadPoolExecutorWithTLS._tls_init,
+ initargs=(self.model_factory,),
+ )
+ return self
+
+ def get_predictor(self) -> typing.Callable[[ModelInput], typing.Any]:
+ return ModelRunnerThreadPoolExecutorWithTLS._tls_predict
+
+ @staticmethod
+ def _tls_init(model_factory: ModelFactory):
+ ModelRunnerThreadPoolExecutorWithTLS.tls = threading.local()
+ ModelRunnerThreadPoolExecutorWithTLS.tls.model = model_factory.create()
+
+ @staticmethod
+ def _tls_predict(input: ModelInput):
+ return ModelRunnerThreadPoolExecutorWithTLS.tls.model.predict(input)
+
+
+class ModelRunnerProcessPoolExecutor(ModelRunnerPoolExecutor):
+ _model: Model
+
+ def __init__(self, model_factory: ModelFactory, max_concurrency: int):
+ super().__init__()
+ self.max_concurrency = max_concurrency
+ ModelRunnerProcessPoolExecutor._model = model_factory.create()
+
+ def __enter__(self):
+ self.executor = concurrent.futures.ProcessPoolExecutor(
+ max_workers=self.max_concurrency
+ )
+ return self
+
+ def get_predictor(self) -> typing.Callable[[ModelInput], typing.Any]:
+ return ModelRunnerProcessPoolExecutor._predict
+
+ @staticmethod
+ def _predict(input: ModelInput):
+ result = ModelRunnerProcessPoolExecutor._model.predict(input)
+ return result
+
+
+class ModelRunnerMultiProcessingPool(ModelRunner):
+ _model: Model
+
+ def __init__(
+ self,
+ model_factory: ModelFactory,
+ max_concurrency: int,
+ ):
+ self.max_concurrency = max_concurrency
+ self.task: multiprocessing.ApplyResult = None
+ ModelRunnerMultiProcessingPool._model = model_factory.create()
+
+ def __enter__(self):
+ self.pool = multiprocessing.Pool(self.max_concurrency)
+
+ def __exit__(self, _exc_type, _exc_value, _traceback):
+ if self.pool:
+ self.pool.terminate()
+ return super().__exit__(_exc_type, _exc_value, _traceback)
+
+ def issue_query(self, queries: QueryInput) -> typing.Optional[QueryResult]:
+ if hasattr(self, "tasks"):
+ assert len(self.tasks) == 0
+ for query_id, model_input in queries.items():
+ task = self.pool.apply_async(
+ ModelRunnerMultiProcessingPool._predict, (model_input,)
+ )
+ self.tasks[task] = query_id
+ else:
+ assert self.task is None
+ inputs = [
+ [query_id, model_input] for query_id, model_input in queries.items()
+ ]
+ self.task = self.pool.starmap_async(
+ ModelRunnerMultiProcessingPool._predict_with_id, inputs
+ )
+ return None
+
+ def flush_queries(self) -> typing.Optional[QueryResult]:
+ if hasattr(self, "tasks"):
+ result = dict()
+ for task, query_id in self.tasks.items():
+ task_result = task.get()
+ result[query_id] = task_result
+ return result
+ else:
+ task_result = self.task.get()
+ result = {
+ query_id: query_result for query_id,
+ query_result in task_result}
+ return result
+
+ @staticmethod
+ def _predict(input: ModelInput):
+ result = ModelRunnerMultiProcessingPool._model.predict(input)
+ return result
+
+ @staticmethod
+ def _predict_with_id(query_id: int, input: ModelInput):
+ result = ModelRunnerMultiProcessingPool._model.predict(input)
+ return (query_id, result)
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/main.py b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/main.py
new file mode 100644
index 0000000000..58f9291322
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/main.py
@@ -0,0 +1,274 @@
+import argparse
+import contextlib
+import logging
+import os
+import re
+import typing
+
+import mlperf_loadgen
+import psutil
+
+from loadgen.harness import Harness, ModelRunner
+from loadgen.runners import (
+ ModelRunnerInline,
+ ModelRunnerMultiProcessingPool,
+ ModelRunnerProcessPoolExecutor,
+ ModelRunnerThreadPoolExecutor,
+ ModelRunnerThreadPoolExecutorWithTLS,
+)
+
+logger = logging.getLogger(__name__)
+
+
+def main(
+ backend: str,
+ model_path: str,
+ model_code: str,
+ model_cfg: str,
+ model_sample_pickle: str,
+ output_path: typing.Optional[str],
+ runner_name: str,
+ runner_concurrency: int,
+ execution_provider: str,
+ execution_mode: str,
+ intraop_threads: int,
+ interop_threads: int,
+ samples: int,
+ loadgen_expected_qps: float,
+ loadgen_duration_sec: float
+):
+
+ print('=====================================================================')
+
+ if backend == 'onnxruntime':
+ from backend_onnxruntime import XModelFactory
+ from backend_onnxruntime import XModelInputSampler
+ elif backend == 'pytorch':
+ from backend_pytorch import XModelFactory
+ from backend_pytorch import XModelInputSampler
+ else:
+ raise Exception("Error: backend is not recognized.")
+
+ # Load model cfg
+ model_cfg_dict = {}
+ if model_cfg != '':
+ import json
+
+ with open(model_cfg) as mc:
+ model_cfg_dict = json.load(mc)
+
+ model_factory = XModelFactory(
+ model_path,
+ execution_provider,
+ execution_mode,
+ interop_threads,
+ intraop_threads,
+ model_code,
+ model_cfg_dict,
+ model_sample_pickle
+ )
+
+ model_dataset = XModelInputSampler(model_factory)
+
+ runner: ModelRunner = None
+ if runner_name == "inline":
+ runner = ModelRunnerInline(model_factory)
+ elif runner_name == "threadpool":
+ runner = ModelRunnerThreadPoolExecutor(
+ model_factory, max_concurrency=runner_concurrency
+ )
+ elif runner_name == "threadpool+replication":
+ runner = ModelRunnerThreadPoolExecutorWithTLS(
+ model_factory, max_concurrency=runner_concurrency
+ )
+ elif runner_name == "processpool":
+ runner = ModelRunnerProcessPoolExecutor(
+ model_factory, max_concurrency=runner_concurrency
+ )
+ elif runner_name == "processpool+mp":
+ runner = ModelRunnerMultiProcessingPool(
+ model_factory, max_concurrency=runner_concurrency
+ )
+ else:
+ raise ValueError(f"Invalid runner {runner}")
+
+ settings = mlperf_loadgen.TestSettings()
+
+ settings.scenario = mlperf_loadgen.TestScenario.Offline
+ settings.mode = mlperf_loadgen.TestMode.PerformanceOnly
+ settings.offline_expected_qps = loadgen_expected_qps
+ settings.min_query_count = samples
+ settings.max_query_count = samples
+ settings.min_duration_ms = loadgen_duration_sec * 1000
+ # Duration isn't enforced in offline mode
+ # Instead, it is used to determine total sample count via
+ # target_sample_count = Slack (1.1) * TargetQPS (1) * TargetDuration ()
+ # samples_per_query = Max(min_query_count, target_sample_count)
+
+ output_path = "results" if not output_path else output_path
+ output_path = os.path.join(
+ output_path,
+ os.path.basename(model_path),
+ runner_name)
+ os.makedirs(output_path, exist_ok=True)
+
+ output_settings = mlperf_loadgen.LogOutputSettings()
+ output_settings.outdir = output_path
+ output_settings.copy_summary_to_stdout = True
+
+ log_settings = mlperf_loadgen.LogSettings()
+ log_settings.log_output = output_settings
+ log_settings.enable_trace = False
+
+ logger.info(f"Model: {model_path}")
+ logger.info(f"Runner: {runner_name}, Concurrency: {runner_concurrency}")
+ logger.info(f"Results: {output_path}")
+
+ with contextlib.ExitStack() as stack:
+ stack.enter_context(runner)
+ harness = Harness(model_dataset, runner)
+
+ query_sample_libary = mlperf_loadgen.ConstructQSL(
+ samples, # Total sample count
+ samples, # Num to load in RAM at a time
+ harness.load_query_samples,
+ harness.unload_query_samples,
+ )
+ system_under_test = mlperf_loadgen.ConstructSUT(
+ harness.issue_query, harness.flush_queries
+ )
+
+ print('=====================================================================')
+ logger.info("Test Started")
+
+ mlperf_loadgen.StartTestWithLogSettings(
+ system_under_test, query_sample_libary, settings, log_settings
+ )
+
+ logger.info("Test Finished")
+ print('=====================================================================')
+
+ # Parse output file
+ output_summary = {}
+ output_summary_path = os.path.join(
+ output_path, "mlperf_log_summary.txt")
+ with open(output_summary_path, "r") as output_summary_file:
+ for line in output_summary_file:
+ m = re.match(
+ r"^\s*([\w\s.\(\)\/]+)\s*\:\s*([\w\+\.]+).*", line)
+ if m:
+ output_summary[m.group(1).strip()] = m.group(2).strip()
+ logger.info(
+ "Observed QPS: " +
+ output_summary.get("Samples per second"))
+ logger.info("Result: " + output_summary.get("Result is"))
+
+ mlperf_loadgen.DestroySUT(system_under_test)
+ mlperf_loadgen.DestroyQSL(query_sample_libary)
+ logger.info("Test Completed")
+ print('=====================================================================')
+
+
+if __name__ == "__main__":
+ print('')
+
+ logging.basicConfig(
+ level=logging.DEBUG,
+ format="%(asctime)s %(levelname)s %(threadName)s - %(name)s %(funcName)s: %(message)s",
+ )
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "model_path", help="path to input model", default="models/yolov5s.onnx"
+ )
+ parser.add_argument(
+ "-b",
+ "--backend",
+ help="backend",
+ default="onnxruntime")
+ parser.add_argument("-o", "--output", help="path to store loadgen results")
+ parser.add_argument(
+ "-r",
+ "--runner",
+ help="model runner",
+ choices=[
+ "inline",
+ "threadpool",
+ "threadpool+replication",
+ "processpool",
+ "processpool+mp",
+ ],
+ default="inline",
+ )
+ parser.add_argument(
+ "--concurrency",
+ help="concurrency count for runner",
+ default=psutil.cpu_count(False),
+ type=int,
+ )
+ parser.add_argument(
+ "--ep", help="Execution Provider", default="CPUExecutionProvider"
+ )
+ parser.add_argument(
+ "--intraop",
+ help="IntraOp threads",
+ default=0,
+ type=int)
+ parser.add_argument(
+ "--interop",
+ help="InterOp threads",
+ default=0,
+ type=int)
+ parser.add_argument(
+ "--execmode",
+ help="Execution Mode",
+ choices=["sequential", "parallel"],
+ default="sequential",
+ )
+ parser.add_argument(
+ "--samples",
+ help="number of samples",
+ default=100,
+ type=int,
+ )
+ parser.add_argument(
+ "--loadgen_expected_qps",
+ help="Expected QPS",
+ default=1,
+ type=float)
+ parser.add_argument(
+ "--loadgen_duration_sec",
+ help="Expected duration in sec.",
+ default=1,
+ type=float)
+ parser.add_argument(
+ "--model_code",
+ help="(for PyTorch models) path to model code with cmc.py",
+ default="")
+ parser.add_argument(
+ "--model_cfg",
+ help="(for PyTorch models) path to model's configuration in JSON file",
+ default="")
+ parser.add_argument(
+ "--model_sample_pickle",
+ help="(for PyTorch models) path to a model sample in pickle format",
+ default="")
+
+ args = parser.parse_args()
+ main(
+ args.backend,
+ args.model_path,
+ args.model_code,
+ args.model_cfg,
+ args.model_sample_pickle,
+ args.output,
+ args.runner,
+ args.concurrency,
+ args.ep,
+ args.execmode,
+ args.intraop,
+ args.interop,
+ args.samples,
+ args.loadgen_expected_qps,
+ args.loadgen_duration_sec
+ )
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/utils.py b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/utils.py
new file mode 100644
index 0000000000..f7b0bfd7da
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/utils.py
@@ -0,0 +1,18 @@
+# Developer: Grigori Fursin
+
+import os
+import psutil
+
+
+def print_host_memory_use(text=''):
+
+ pid = os.getpid()
+ python_process = psutil.Process(pid)
+ memoryUse = python_process.memory_info()[0]
+
+ if text == '':
+ text = 'host memory use'
+
+ print('{}: {} MB'.format(text, int(memoryUse / 1000000)))
+
+ return
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.bat b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.bat
new file mode 100644
index 0000000000..c7154832fb
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.bat
@@ -0,0 +1,7 @@
+rem set CM_CACHE=--no-cache
+
+set CM_DOCKER_ORG=modularcm
+set CM_DOCKER_NAME=loadgen-generic-python
+set CM_OS_NAME=ubuntu
+set CM_HW_TARGET=cpu
+set CM_OS_VERSION=22.04
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.sh b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.sh
new file mode 100644
index 0000000000..5f49d3be9b
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.sh
@@ -0,0 +1,10 @@
+#! /bin/bash
+
+#export CM_CACHE="--no-cache"
+
+export CM_DOCKER_ORG=modularcm
+export CM_DOCKER_NAME="loadgen-generic-python"
+export CM_OS_NAME="ubuntu"
+export CM_HW_TARGET="cpu"
+export CM_OS_VERSION="22.04"
+
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/build.bat b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/build.bat
new file mode 100644
index 0000000000..f51ea46b64
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/build.bat
@@ -0,0 +1,16 @@
+call _common.bat
+
+docker build -f %CM_DOCKER_NAME%--%CM_OS_NAME%-%CM_HW_TARGET%.Dockerfile ^
+ -t %CM_DOCKER_ORG%/%CM_DOCKER_NAME%-%CM_HW_TARGET%:%CM_OS_NAME%-%CM_OS_VERSION% ^
+ --build-arg cm_os_name=%CM_OS_NAME% ^
+ --build-arg cm_hw_target=%CM_HW_TARGET% ^
+ --build-arg cm_os_version=%CM_OS_VERSION% ^
+ --build-arg cm_version="" ^
+ --build-arg cm_automation_repo="ctuning@mlcommons-ck" ^
+ --build-arg cm_automation_checkout="" ^
+ --build-arg cm_python_version="3.10.8" ^
+ --build-arg cm_mlperf_inference_loadgen_version="" ^
+ --build-arg cm_mlperf_inference_src_tags="" ^
+ --build-arg cm_mlperf_inference_src_version="" ^
+ --build-arg CM_ONNXRUNTIME_VERSION="1.13.1" ^
+ %CM_CACHE% .
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/build.sh b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/build.sh
new file mode 100644
index 0000000000..186a0eae94
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/build.sh
@@ -0,0 +1,18 @@
+#! /bin/bash
+
+. ./_common.sh
+
+time docker build -f ${CM_DOCKER_NAME}--${CM_OS_NAME}-${CM_HW_TARGET}.Dockerfile \
+ -t ${CM_DOCKER_ORG}/${CM_DOCKER_NAME}-${CM_HW_TARGET}:${CM_OS_NAME}-${CM_OS_VERSION} \
+ --build-arg cm_os_name=${CM_OS_NAME} \
+ --build-arg cm_hw_target=${CM_HW_TARGET} \
+ --build-arg cm_os_version=${CM_OS_VERSION} \
+ --build-arg cm_version="" \
+ --build-arg cm_automation_repo="ctuning@mlcommons-ck" \
+ --build-arg cm_automation_checkout="" \
+ --build-arg cm_python_version="3.10.8" \
+ --build-arg cm_mlperf_inference_loadgen_version="" \
+ --build-arg cm_mlperf_inference_src_tags="" \
+ --build-arg cm_mlperf_inference_src_version="" \
+ --build-arg CM_ONNXRUNTIME_VERSION="1.13.1" \
+ ${CM_CACHE} .
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python--ubuntu-cpu.Dockerfile b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python--ubuntu-cpu.Dockerfile
new file mode 100644
index 0000000000..c82296c664
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python--ubuntu-cpu.Dockerfile
@@ -0,0 +1,96 @@
+# Modular MLPerf container with the MLCommons CM automation meta-framework
+
+# Preparing OS
+ARG cm_os_name="ubuntu"
+ARG cm_os_version="22.04"
+
+FROM ${cm_os_name}:${cm_os_version}
+
+# Maintained by the MLCommons taskforce on automation and reproducibility and OctoML
+LABEL github="https://github.com/mlcommons/ck"
+LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce"
+
+# Customization
+ARG CM_GH_TOKEN
+
+# Prepare shell and entry point
+SHELL ["/bin/bash", "-c"]
+ENTRYPOINT ["/bin/bash", "-c"]
+
+# Install system dependencies
+# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes
+RUN apt-get update -y
+RUN apt-get install -y lsb-release
+RUN apt-get install -y python3 python3-pip git wget sudo
+
+# Extra python deps
+RUN python3 -m pip install requests
+
+# CM version
+ARG cm_version=""
+ENV CM_VERSION="${cm_version}"
+RUN if [ "${CM_VERSION}" != "" ] ; then \
+ python3 -m pip install cmind==${CM_VERSION} ; \
+ else \
+ python3 -m pip install cmind ; \
+ fi
+
+# Setup docker environment
+ENTRYPOINT ["/bin/bash", "-c"]
+ENV TZ=US/Pacific
+RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone
+
+# Setup docker user
+# See example in https://github.com/mlcommons/GaNDLF/blob/master/Dockerfile-CPU
+RUN groupadd --gid 10001 cm
+RUN useradd --uid 10000 -g cm --create-home --shell /bin/bash cmuser
+RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+
+USER cmuser:cm
+WORKDIR /home/cmuser
+
+# Check CM installation
+RUN lsb_release -a > sys-version-os.log
+RUN uname -a > sys-version-kernel.log
+RUN python3 --version > sys-version-python3.log
+RUN cm version > sys-version-cm.log
+
+################################################################################
+# Get CM automation repository
+ARG cm_automation_repo="mlcommons@ck"
+ARG cm_automation_repo_checkout=""
+ENV CM_AUTOMATION_REPO=${cm_automation_repo}
+ENV CM_AUTOMATION_REPO_CHECKOUT=${cm_automation_repo_checkout}
+RUN echo ${CM_AUTOMATION_REPO}
+RUN cm pull repo ${CM_AUTOMATION_REPO} --checkout=${CM_AUTOMATION_REPO_CHECKOUT}
+
+################################################################################
+# Install CM system dependencies
+RUN cm run script "get sys-utils-cm" --quiet
+
+# Detect/install python
+ARG cm_python_version=""
+RUN cm run script "get python3" --version=${cm_python_version}
+
+################################################################################
+# Build MLPerf loadgen
+ARG cm_mlperf_inference_loadgen_version=""
+RUN cm run script "get mlperf loadgen" --adr.compiler.tags=gcc --version=${cm_mlperf_inference_loadgen_version} --adr.inference-src-loadgen.version=${cm_mlperf_inference_loadgen_version} -v
+
+################################################################################
+# Install ONNX runtime
+ARG CM_ONNXRUNTIME_VERSION=""
+RUN cm run script "get generic-python-lib _onnxruntime" --version=${CM_ONNXRUNTIME_VERSION}
+
+ARG CM_MLPERF_CHOICE_BACKEND="onnxruntime"
+ARG CM_MLPERF_CHOICE_DEVICE="cpu"
+
+RUN cm run script --tags=python,app,loadgen-generic,_onnxruntime,_resnet50 \
+ --adr.compiler.tags=gcc \
+ --adr.python.version_min=3.8 \
+ --quiet \
+ --fake_run
+
+################################################################################
+# CMD entry point
+CMD /bin/bash
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python-auto.Dockerfile b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python-auto.Dockerfile
new file mode 100644
index 0000000000..195acdec6a
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python-auto.Dockerfile
@@ -0,0 +1,33 @@
+FROM ubuntu:20.04
+SHELL ["/bin/bash", "-c"]
+ARG CM_GH_TOKEN
+
+# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes
+# Install system dependencies
+RUN apt-get update -y
+RUN apt-get install -y python3 python3-pip git sudo wget
+
+# Install python packages
+RUN python3 -m pip install cmind requests
+
+# Setup docker environment
+ENTRYPOINT ["/bin/bash", "-c"]
+ENV TZ=US/Pacific
+ENV PATH=${PATH}:$HOME/.local/bin
+RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone
+
+# Setup docker user
+RUN groupadd cm
+RUN useradd -g cm --create-home --shell /bin/bash cmuser
+RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+USER cmuser:cm
+WORKDIR /home/cmuser
+
+# Download CM repo for scripts
+RUN cm pull repo ctuning@mlcommons-ck
+
+# Install all system dependencies
+RUN cm run script --quiet --tags=get,sys-utils-cm
+
+# Run commands
+RUN cm run script --quiet --tags=python,app,loadgen-generic,_onnxruntime,_resnet50 --fake_run
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/run.bat b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/run.bat
new file mode 100644
index 0000000000..171aeecab9
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/run.bat
@@ -0,0 +1,3 @@
+call _common.bat
+
+docker run -it %CM_DOCKER_ORG%/%CM_DOCKER_NAME%-%CM_HW_TARGET%:%CM_OS_NAME%-%CM_OS_VERSION%
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/run.sh
new file mode 100644
index 0000000000..c82d4b7b12
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/run.sh
@@ -0,0 +1,3 @@
+. ./_common.sh
+
+docker run -it ${CM_DOCKER_ORG}/${CM_DOCKER_NAME}-%CM_HW_TARGET%:${CM_OS_NAME}-${CM_OS_VERSION}
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/COPYRIGHT.md
new file mode 100644
index 0000000000..696f829223
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone.
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/_cm.yaml
new file mode 100644
index 0000000000..305578a17a
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/_cm.yaml
@@ -0,0 +1,341 @@
+# Identification of this CM script
+alias: app-mlperf-inference-amd
+uid: 467cdb20aabc4394
+cache: false
+
+automation_alias: script
+automation_uid: 5b4e0237da074764
+
+category: "Modular MLPerf benchmarks"
+
+
+# User-friendly tags to find this CM script
+tags:
+ - reproduce
+ - mlcommons
+ - mlperf
+ - inference
+ - harness
+ - amd-harness
+ - amd
+
+# Default environment
+default_env:
+ CM_MLPERF_LOADGEN_SCENARIO: Offline
+ CM_MLPERF_LOADGEN_MODE: performance
+ CM_SKIP_PREPROCESS_DATASET: 'no'
+ CM_SKIP_MODEL_DOWNLOAD: 'no'
+ CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: redhat_harness
+ CM_MLPERF_SKIP_RUN: 'no'
+
+env:
+ CM_CALL_MLPERF_RUNNER: 'no'
+
+# Map script inputs to environment variables
+input_mapping:
+ count: CM_MLPERF_LOADGEN_QUERY_COUNT
+ max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE
+ mlperf_conf: CM_MLPERF_CONF
+ mode: CM_MLPERF_LOADGEN_MODE
+ output_dir: CM_MLPERF_OUTPUT_DIR
+ performance_sample_count: CM_MLPERF_PERFORMANCE_SAMPLE_COUNT
+ scenario: CM_MLPERF_LOADGEN_SCENARIO
+ user_conf: CM_MLPERF_USER_CONF
+ skip_preprocess: CM_SKIP_PREPROCESS_DATASET
+ skip_preprocessing: CM_SKIP_PREPROCESS_DATASET
+ target_qps: CM_MLPERF_LOADGEN_TARGET_QPS
+ offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
+ server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS
+ target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY
+ singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY
+ multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
+ performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
+ rerun: CM_RERUN
+ results_repo: CM_MLPERF_INFERENCE_RESULTS_REPO
+
+new_state_keys:
+ - mlperf-inference-implementation
+ - CM_SUT_*
+
+# Env keys which are exposed to higher level scripts
+new_env_keys:
+ - CM_MLPERF_*
+ - CM_DATASET_*
+ - CM_HW_NAME
+ - CM_ML_MODEL_*
+ - CM_MAX_EXAMPLES
+ - CM_IMAGENET_ACCURACY_DTYPE
+ - CM_SQUAD_ACCURACY_DTYPE
+
+
+# Dependencies on other CM scripts
+
+deps:
+
+ # Detect host OS features
+ - tags: detect,os
+
+ # Detect host CPU features
+ - tags: detect,cpu
+
+ # Install system dependencies on a given host
+ - tags: get,sys-utils-cm
+
+
+ ########################################################################
+ # Install MLPerf inference dependencies
+
+ # Download MLPerf inference source
+ - tags: get,mlcommons,inference,src
+ names:
+ - inference-src
+
+ # Download MLPerf inference loadgen
+ - tags: get,mlcommons,inference,loadgen
+ names:
+ - inference-loadgen
+
+ # Creates user conf for given SUT
+ - tags: generate,user-conf,mlperf,inference
+ names:
+ - user-conf-generator
+
+ # Get MLPerf logging library
+ - tags: get,generic-python-lib,_mlperf_logging
+ names:
+ - mlperf-logging
+
+ - tags: get,git,repo
+ names:
+ - inference-results
+ - inference-code
+ update_tags_from_env_with_prefix:
+ _repo.:
+ - CM_MLPERF_INFERENCE_RESULTS_REPO
+ env:
+ CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO
+ extra_cache_tags: results,repo,mlperf
+
+# Post dependencies to run this app including for power measurement
+post_deps:
+
+ - names:
+ - runner
+ - mlperf-runner
+ skip_if_env:
+ CM_MLPERF_SKIP_RUN:
+ - 'yes'
+ - yes
+ tags: benchmark-mlperf
+
+ - tags: save,mlperf,inference,state
+ names:
+ - save-mlperf-inference-state
+
+# Variations to customize dependencies
+variations:
+ # Target devices
+ cpu:
+ group: device
+ default: true
+ env:
+ CM_MLPERF_DEVICE: cpu
+ cuda:
+ group: device
+ env:
+ CM_MLPERF_DEVICE: gpu
+ CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart
+
+ rocm:
+ group: device
+ env:
+ CM_MLPERF_DEVICE: rocm
+ CM_MLPERF_DEVICE_LIB_NAMESPEC: rocm
+
+ openshift:
+ group: backend
+ default: true
+ env:
+ CM_MLPERF_BACKEND: openshift
+
+ pytorch:
+ group: backend
+ env:
+ CM_MLPERF_BACKEND: pytorch
+
+ pytorch,cuda:
+ deps:
+ - tags: get,generic-python-lib,_torch_cuda
+
+ pytorch,rocm:
+ deps:
+ - tags: get,generic-python-lib,_torch,_rocm
+
+ pytorch,cpu:
+ deps:
+ - tags: get,generic-python-lib,_torch
+
+ bs.#:
+ group: batch-size
+
+
+ # Reference MLPerf models
+ resnet50:
+ group: model
+ default: true
+ env:
+ CM_MODEL: resnet50
+
+ retinanet:
+ group: model
+ base:
+ - bs.1
+ env:
+ CM_MODEL: retinanet
+
+ bert_:
+ {}
+
+ bert-99:
+ group: model
+ base:
+ - bert_
+ env:
+ CM_MODEL: bert-99
+ CM_SQUAD_ACCURACY_DTYPE: float32
+
+ bert-99.9:
+ group: model
+ base:
+ - bert_
+ env:
+ CM_MODEL: bert-99.9
+
+ bert_:
+ {}
+
+ bert-99:
+ group: model
+ base:
+ - bert_
+ env:
+ CM_MODEL: bert-99
+ CM_SQUAD_ACCURACY_DTYPE: float32
+
+ bert-99.9:
+ group: model
+ base:
+ - bert_
+ env:
+ CM_MODEL: bert-99.9
+
+ gptj_:
+ deps:
+ - tags: get,ml-model,gptj
+ names:
+ - gptj-model
+ - tags: get,dataset,cnndm,_validation
+
+ gptj-99:
+ group: model
+ base:
+ - gptj_
+ env:
+ CM_MODEL: gptj-99
+ CM_SQUAD_ACCURACY_DTYPE: float32
+
+ gptj-99.9:
+ group: model
+ base:
+ - gptj_
+ env:
+ CM_MODEL: gptj-99.9
+
+ llama2-70b_:
+ deps:
+ - tags: get,generic-python-lib,_package.compressed_tensors
+ names:
+ - compressed_tensors
+ - tags: get,preprocessed,dataset,openorca,_mlc,_validation
+ - tags: get,ml-model,llama2,_amd,_pytorch
+ skip_if_env:
+ CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST:
+ - 'yes'
+ CM_RUN_STATE_DOCKER:
+ - 'yes'
+ - tags: get,preprocessed,dataset,openorca,_mlc,_validation
+ - tags: download,file,_url.https://github.com/vllm-project/vllm/blob/38c4b7e863570a045308af814c72f4504297222e/tests/fp8_kv/llama2-70b-fp8-kv/kv_cache_scales.json
+ extra_cache_tags: llama2-scales,kv-cache
+ force_cache: true
+ env:
+ CM_DOWNLOAD_FINAL_ENV_NAME: QUANTIZATION_PARAM_PATH
+ - tags: get,generic-python-lib,_package.vllm
+ names:
+ - vllm
+ - tags: get,git,repo,_repo.https://github.com/mlcommons/inference_results_v4.1,_branch.cm-code-only
+ extra_cache_tags: inference,results
+ env:
+ CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_RESULTS_PATH
+
+ llama2-70b-99:
+ group: model
+ base:
+ - llama2-70b_
+ env:
+ CM_MODEL: llama2-70b-99
+
+ llama2-70b-99.9:
+ group: model
+ base:
+ - llama2-70b_
+ env:
+ CM_MODEL: llama2-70b-99.9
+
+ singlestream:
+ group: loadgen-scenario
+ env:
+ CM_MLPERF_LOADGEN_SCENARIO: SingleStream
+
+ singlestream,resnet50:
+ default_variations:
+ batch-size: bs.1
+
+ singlestream,retinanet:
+ default_variations:
+ batch-size: bs.1
+
+ multistream:
+ group: loadgen-scenario
+ env:
+ CM_MLPERF_LOADGEN_SCENARIO: MultiStream
+
+ offline:
+ group: loadgen-scenario
+ env:
+ CM_MLPERF_LOADGEN_SCENARIO: Offline
+
+ server:
+ group: loadgen-scenario
+ env:
+ CM_MLPERF_LOADGEN_SCENARIO: Server
+
+ uint8:
+ group: precision
+ fp16:
+ group: precision
+ fp32:
+ group: precision
+
+ r4.1-dev_default:
+ group: version
+ default: true
+ env:
+ CM_MLPERF_INFERENCE_RESULTS_REPO: https://github.com/mlcommons/inference_results_v4.0
+
+ r4.1_default:
+ group: version
+ env:
+ CM_MLPERF_INFERENCE_RESULTS_REPO: https://github.com/mlcommons/inference_results_v4.1
+
+docker:
+ real_run: False
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/customize.py b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/customize.py
new file mode 100644
index 0000000000..7c6b91e580
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/customize.py
@@ -0,0 +1,59 @@
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+from cmind import utils
+import os
+import shutil
+
+
+def preprocess(i):
+
+ os_info = i['os_info']
+
+ if os_info['platform'] == 'windows':
+ return {'return': 1, 'error': 'Windows is not supported in this script yet'}
+ env = i['env']
+
+ if env.get('CM_MLPERF_SKIP_RUN', '') == "yes":
+ return {'return': 0}
+
+ env['CM_MLPERF_AMD_SCRIPT_PATH'] = env['CM_TMP_CURRENT_SCRIPT_PATH']
+ env['CM_MLPERF_AMD_CODE_PATH'] = os.path.join(
+ env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], "closed", "AMD")
+
+ if 'CM_MODEL' not in env:
+ return {
+ 'return': 1, 'error': 'Please select a variation specifying the model to run'}
+ if 'CM_MLPERF_BACKEND' not in env:
+ return {'return': 1,
+ 'error': 'Please select a variation specifying the backend'}
+ if 'CM_MLPERF_DEVICE' not in env:
+ return {
+ 'return': 1, 'error': 'Please select a variation specifying the device to run on'}
+
+ if "llama2" in env['CM_MODEL']:
+ env['CM_RUN_DIR'] = i['run_script_input']['path']
+ env['CM_MLPERF_AMD_LLAMA2_CODE_PATH'] = os.path.join(
+ env['CM_MLPERF_AMD_CODE_PATH'], "llama2-70b-99.9/VllmFp8")
+ env['CM_RUN_CMD'] = "bash run-llama2.sh "
+ else:
+ return {'return': 1, 'error': 'Model {} not supported'.format(
+ env['CM_MODEL'])}
+
+ return {'return': 0}
+ # return {'return':1, 'error': 'Run command needs to be tested'}
+
+
+def postprocess(i):
+
+ env = i['env']
+
+ return {'return': 0}
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/run-llama2.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/run-llama2.sh
new file mode 100644
index 0000000000..10f36f8ca2
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/run-llama2.sh
@@ -0,0 +1,76 @@
+#!/bin/bash
+
+set -xeu
+
+N_SAMPLES=${N_SAMPLES:-24576} #24576 #3072 #2457 #6
+TP=1
+DP=${DP:-8}
+
+export HIP_FORCE_DEV_KERNARG=1
+export VLLM_USE_TRITON_FLASH_ATTN=0
+export VLLM_FP8_PADDING=1
+export VLLM_FP8_ACT_PADDING=1
+export VLLM_FP8_WEIGHT_PADDING=1
+export VLLM_FP8_REDUCE_CONV=1
+
+export HARNESS_DISABLE_VLLM_LOGS=1
+export VLLM_LOGGING_LEVEL=ERROR
+
+MODEL_PATH=${LLAMA2_CHECKPOINT_PATH:-/data/llm/llama2-70b-chat/}
+DATASET_PATH=${CM_DATASET_OPENORCA_PREPROCESSED_PATH:-/data/open_orca/open_orca_gpt4_tokenized_llama.sampled_24576.pkl.gz}
+QUANTIZED_WEIGHTS_PATH=${CM_LLAMA2_FINAL_SAFE_TENSORS_PATH:-quantized/quark_share/modelzoo/llama2_70b_wfp8_afp8_ofp8_nomerge/json-safetensors/llama.safetensors}
+QUANTIZATION_PARAM_PATH=${QUANTIZATION_PARAM_PATH:-/app/kv_cache_scales.json}
+
+MLPERF_CONF="${CM_MLPERF_CONF:-/app/mlperf_inference/mlperf.conf}"
+USER_CONF="${CM_MLPERF_USER_CONF:-/lab-mlperf-inference/code/llama2-70b-99.9/mlperf_config_VllmFp8/user.conf}"
+
+SUBMISSION=${SUBMISSION:-0}
+
+LOG_DIR=${CM_MLPERF_OUTPUT_DIR}
+
+cp $USER_CONF ${LOG_DIR}/user.conf
+
+COMMON_CMD_OPTIONS="\
+ --scenario ${CM_MLPERF_LOADGEN_SCENARIO} \
+ --output-log-dir ${LOG_DIR} \
+ --model-path $MODEL_PATH \
+ --mlperf-conf $MLPERF_CONF \
+ --user-conf $USER_CONF \
+ --total-sample-count $N_SAMPLES \
+ --dataset-path $DATASET_PATH \
+ --dtype float16 \
+ --backend vllm \
+ --device cuda:0 \
+ --kv-cache-dtype fp8 \
+ -tp ${TP} \
+ -dp ${DP} \
+ --quantization fp8 \
+ --quantized-weights-path ${QUANTIZED_WEIGHTS_PATH} \
+ --quantization-param-path ${QUANTIZATION_PARAM_PATH}"
+
+if [ "${CM_MLPERF_LOADGEN_MODE}" == "accuracy" ]; then
+ COMMON_CMD_OPTIONS+=" --accuracy"
+fi
+
+if [ "${CM_MLPERF_LOADGEN_SCENARIO}" == "Offline" ]; then
+ WD=${WD:-0}
+ SORTING=${SORTING:-descending} #ascending #descending #lexicographic #skip
+ export VLLM_SCHED_PREFILL_KVC_FREEPCT=31.0
+ # generate run command
+ cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_AMD_LLAMA2_CODE_PATH}/mainVllmFp8_Offline.py \
+ ${COMMON_CMD_OPTIONS} \
+ --warmup-duration ${WD} \
+ --sorting ${SORTING} \
+ --enforce-eager True \
+ --gpu-memory-utilization 0.99"
+else
+ # generate run command
+ cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_AMD_LLAMA2_CODE_PATH}/mainVllmFp8_SyncServer.py \
+ ${COMMON_CMD_OPTIONS} \
+ --enable-warm-up \
+ --enable-batcher"
+fi
+
+echo "${cmd}"
+# uncomment the below lines for testing
+#eval "${cmd}"
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/run.sh
new file mode 100644
index 0000000000..ddcd0b5504
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/run.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then
+ cd ${CM_RUN_DIR}
+ cmd=${CM_RUN_CMD}
+ echo "${cmd}"
+ eval "${cmd}"
+ test $? -eq 0 || exit $?
+fi
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/COPYRIGHT.md
new file mode 100644
index 0000000000..696f829223
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/COPYRIGHT.md
@@ -0,0 +1,3 @@
+© 2022-2025 MLCommons. All Rights Reserved.
+
+Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone.
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/README.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/README.md
new file mode 100644
index 0000000000..f9f7ce6c6e
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/README.md
@@ -0,0 +1 @@
+Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-ctuning-cpp-tflite](https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-ctuning-cpp-tflite) for the documentation of this CM script.
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.yaml
new file mode 100644
index 0000000000..e66ae2bacd
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.yaml
@@ -0,0 +1,282 @@
+alias: app-mlperf-inference-ctuning-cpp-tflite
+automation_alias: script
+automation_uid: 5b4e0237da074764
+category: Modular MLPerf inference benchmark pipeline
+default_env:
+ CM_DATASET_COMPRESSED: 'off'
+ CM_DATASET_INPUT_SQUARE_SIDE: '224'
+ CM_FAST_COMPILATION: 'yes'
+ CM_LOADGEN_BUFFER_SIZE: '1024'
+ CM_MLPERF_LOADGEN_MODE: accuracy
+ CM_MLPERF_LOADGEN_SCENARIO: SingleStream
+ CM_MLPERF_LOADGEN_TRIGGER_COLD_RUN: '0'
+ CM_MLPERF_OUTPUT_DIR: .
+ CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: tflite_cpp
+ CM_MLPERF_TFLITE_USE_NEON: '0'
+ CM_MLPERF_TFLITE_USE_OPENCL: '0'
+ CM_ML_MODEL_GIVEN_CHANNEL_MEANS: 123.68 116.78 103.94
+ CM_ML_MODEL_NORMALIZE_DATA: '0'
+ CM_ML_MODEL_SUBTRACT_MEANS: '1'
+ CM_VERBOSE: '0'
+deps:
+- tags: detect,os
+- tags: detect,cpu
+- tags: get,sys-utils-cm
+- enable_if_env:
+ CM_MLPERF_DEVICE:
+ - gpu
+ tags: get,cuda
+- names:
+ - loadgen
+ tags: get,loadgen
+- names:
+ - inference-src
+ tags: get,mlcommons,inference,src
+- enable_if_env:
+ CM_MLPERF_BACKEND:
+ - tflite
+ - armnn_tflite
+ CM_MODEL:
+ - mobilenet
+ names:
+ - ml-model
+ - tflite-model
+ - mobilenet-model
+ tags: get,ml-model,mobilenet,raw,_tflite
+- enable_if_env:
+ CM_MLPERF_BACKEND:
+ - tflite
+ - armnn_tflite
+ CM_MODEL:
+ - resnet50
+ names:
+ - ml-model
+ - tflite-model
+ - resnet50-model
+ tags: get,ml-model,resnet50,raw,_tflite,_no-argmax
+- enable_if_env:
+ CM_MLPERF_BACKEND:
+ - tf
+ CM_MODEL:
+ - resnet50
+ names:
+ - ml-model
+ - tflite-model
+ - resnet50-model
+ tags: get,ml-model,resnet50,raw,_tf
+- enable_if_env:
+ CM_MLPERF_BACKEND:
+ - tflite
+ - armnn_tflite
+ CM_MODEL:
+ - efficientnet
+ names:
+ - ml-model
+ - tflite-model
+ - efficientnet-model
+ tags: get,ml-model,efficientnet,raw,_tflite
+- names:
+ - tensorflow
+ - tflite
+ tags: get,tensorflow,lib,_tflite
+- enable_if_env:
+ CM_MLPERF_TFLITE_USE_ARMNN:
+ - 'yes'
+ names:
+ - armnn
+ - lib-armnn
+ tags: get,lib,armnn
+input_mapping:
+ compressed_dataset: CM_DATASET_COMPRESSED
+ count: CM_MLPERF_LOADGEN_QUERY_COUNT
+ mlperf_conf: CM_MLPERF_CONF
+ mode: CM_MLPERF_LOADGEN_MODE
+ output_dir: CM_MLPERF_OUTPUT_DIR
+ performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
+ scenario: CM_MLPERF_LOADGEN_SCENARIO
+ user_conf: CM_MLPERF_USER_CONF
+ verbose: CM_VERBOSE
+new_env_keys:
+- CM_MLPERF_*
+- CM_ML_MODEL_*
+- CM_HW_NAME
+new_state_keys:
+- CM_SUT_*
+post_deps:
+- names:
+ - compiler-program
+ skip_if_env:
+ CM_MLPERF_SKIP_RUN:
+ - 'yes'
+ tags: compile,program
+- names:
+ - mlperf-runner
+ skip_if_env:
+ CM_MLPERF_SKIP_RUN:
+ - 'yes'
+ tags: benchmark-mlperf
+- names:
+ - save-mlperf-inference-state
+ tags: save,mlperf,inference,state
+prehook_deps:
+- names:
+ - user-conf-generator
+ tags: generate,user-conf,mlperf,inference
+- enable_if_env:
+ CM_MLPERF_SKIP_RUN:
+ - 'no'
+ CM_MODEL:
+ - resnet50
+ names:
+ - imagenet-preprocessed
+ - preprocessed-dataset
+ skip_if_env:
+ CM_DATASET_COMPRESSED:
+ - 'on'
+ tags: get,dataset,preprocessed,imagenet,_for.resnet50,_rgb32,_NHWC
+ update_tags_from_env:
+ - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS
+- enable_if_env:
+ CM_MLPERF_SKIP_RUN:
+ - 'no'
+ CM_MODEL:
+ - mobilenet
+ - efficientnet
+ names:
+ - imagenet-preprocessed
+ - preprocessed-dataset
+ skip_if_env:
+ CM_DATASET_COMPRESSED:
+ - 'on'
+ tags: get,dataset,preprocessed,imagenet,_for.mobilenet,_rgb32,_NHWC
+ update_tags_from_env:
+ - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS
+- enable_if_env:
+ CM_DATASET_COMPRESSED:
+ - 'on'
+ CM_MLPERF_SKIP_RUN:
+ - 'no'
+ CM_MODEL:
+ - mobilenet
+ - efficientnet
+ names:
+ - imagenet-preprocessed
+ - preprocessed-dataset
+ tags: get,dataset,preprocessed,imagenet,_for.mobilenet,_rgb8,_NHWC
+ update_tags_from_env:
+ - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS
+- enable_if_env:
+ CM_DATASET_COMPRESSED:
+ - 'on'
+ CM_MLPERF_SKIP_RUN:
+ - 'no'
+ CM_MODEL:
+ - resnet50
+ names:
+ - imagenet-preprocessed
+ - preprocessed-dataset
+ tags: get,dataset,preprocessed,imagenet,_for.resnet50,_rgb8,_NHWC
+ update_tags_from_env:
+ - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS
+tags:
+- app
+- mlcommons
+- mlperf
+- inference
+- tflite-cpp
+tags_help: app mlperf inference tflite-cpp
+uid: 415904407cca404a
+variations:
+ armnn:
+ default_variations:
+ optimization-target: use-neon
+ env:
+ CM_MLPERF_TFLITE_USE_ARMNN: 'yes'
+ CM_TMP_LINK_LIBS: tensorflowlite,armnn
+ armnn,tflite:
+ env:
+ CM_MLPERF_BACKEND: armnn_tflite
+ CM_MLPERF_BACKEND_VERSION: <<>>
+ CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: tflite_armnn_cpp
+ CM_TMP_LINK_LIBS: tensorflowlite,armnn,armnnTfLiteParser
+ CM_TMP_SRC_FOLDER: armnn
+ cpu:
+ default: true
+ env:
+ CM_MLPERF_DEVICE: cpu
+ group: device
+ efficientnet:
+ env:
+ CM_MODEL: efficientnet
+ group: model
+ fp32:
+ adr:
+ ml-model:
+ tags: _fp32
+ preprocessed-dataset:
+ tags: _float32
+ default: true
+ env:
+ CM_MLPERF_MODEL_PRECISION: float32
+ group: precision
+ gpu:
+ env:
+ CM_MLPERF_DEVICE: gpu
+ CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart
+ group: device
+ int8:
+ adr:
+ ml-model:
+ tags: _int8
+ preprocessed-dataset:
+ tags: _int8
+ env:
+ CM_DATASET_COMPRESSED: 'on'
+ CM_MLPERF_MODEL_PRECISION: int8
+ group: precision
+ mobilenet:
+ env:
+ CM_MODEL: mobilenet
+ group: model
+ resnet50:
+ default: true
+ env:
+ CM_MODEL: resnet50
+ group: model
+ singlestream:
+ default: true
+ env:
+ CM_MLPERF_LOADGEN_SCENARIO: SingleStream
+ group: loadgen-scenario
+ tf:
+ env:
+ CM_MLPERF_BACKEND: tf
+ group: backend
+ tflite:
+ default: true
+ env:
+ CM_MLPERF_BACKEND: tflite
+ CM_MLPERF_BACKEND_VERSION: master
+ CM_TMP_LINK_LIBS: tensorflowlite
+ CM_TMP_SRC_FOLDER: src
+ group: backend
+ uint8:
+ adr:
+ ml-model:
+ tags: _uint8
+ preprocessed-dataset:
+ tags: _int8
+ env:
+ CM_DATASET_COMPRESSED: 'on'
+ CM_MLPERF_MODEL_PRECISION: uint8
+ group: precision
+ use-neon:
+ env:
+ CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: using_neon
+ CM_MLPERF_TFLITE_USE_NEON: '1'
+ group: optimization-target
+ use-opencl:
+ env:
+ CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: using_opencl
+ CM_MLPERF_TFLITE_USE_OPENCL: '1'
+ group: optimization-target
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp
new file mode 100644
index 0000000000..c641e9d1e7
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp
@@ -0,0 +1,399 @@
+/*
+ * Copyright (c) 2018 cTuning foundation.
+ * See CK COPYRIGHT.txt for copyright details.
+ *
+ * See CK LICENSE for licensing details.
+ * See CK COPYRIGHT for copyright details.
+ */
+
+#include
+#include
+#include
+
+#include "armnn/ArmNN.hpp"
+#include "armnn/Exceptions.hpp"
+#include "armnn/Tensor.hpp"
+#include "armnn/INetwork.hpp"
+#include "armnnTfLiteParser/ITfLiteParser.hpp"
+
+#include "loadgen.h"
+#include "query_sample_library.h"
+#include "system_under_test.h"
+#include "test_settings.h"
+
+
+#include "benchmark.h"
+
+#include "tensorflow/lite/kernels/register.h"
+#include "tensorflow/lite/model.h"
+
+using namespace std;
+using namespace CK;
+
+
+template
+
+class ArmNNBenchmark : public Benchmark {
+public:
+ ArmNNBenchmark(const BenchmarkSettings* settings, TData *in_ptr, TData *out_ptr)
+ : Benchmark(settings, in_ptr, out_ptr) {
+ }
+};
+
+armnn::InputTensors MakeInputTensors(const std::pair& input, const void* inputTensorData)
+{
+ return { {input.first, armnn::ConstTensor(input.second, inputTensorData) } };
+}
+
+armnn::OutputTensors MakeOutputTensors(const std::pair& output, void* outputTensorData)
+{
+ return { {output.first, armnn::Tensor(output.second, outputTensorData) } };
+}
+
+class Program {
+public:
+ Program () : runtime( armnn::IRuntime::Create(options) ) {
+
+ bool use_neon = getenv_b("CM_MLPERF_TFLITE_USE_NEON");
+ bool use_opencl = getenv_b("CM_MLPERF_TFLITE_USE_OPENCL");
+ string input_layer_name = getenv_s("CM_ML_MODEL_INPUT_LAYER_NAME");
+ string output_layer_name = getenv_s("CM_ML_MODEL_OUTPUT_LAYER_NAME");
+
+ settings = new BenchmarkSettings(MODEL_TYPE::LITE);
+
+ session = new BenchmarkSession(settings);
+
+ armnnTfLiteParser::ITfLiteParserPtr parser = armnnTfLiteParser::ITfLiteParser::Create();
+
+ // Optimize the network for a specific runtime compute device, e.g. CpuAcc, GpuAcc
+ //std::vector optOptions = {armnn::Compute::CpuAcc, armnn::Compute::GpuAcc};
+ std::vector optOptions = {armnn::Compute::CpuRef};
+ if( use_neon && use_opencl) {
+ optOptions = {armnn::Compute::CpuAcc, armnn::Compute::GpuAcc};
+ } else if( use_neon ) {
+ optOptions = {armnn::Compute::CpuAcc};
+ } else if( use_opencl ) {
+ optOptions = {armnn::Compute::GpuAcc};
+ }
+
+ cout << "\nLoading graph..." << endl;
+
+ armnn::INetworkPtr network = parser->CreateNetworkFromBinaryFile(settings->graph_file().c_str());
+ if (!network)
+ throw "Failed to load graph from file";
+
+ armnnTfLiteParser::BindingPointInfo inputBindingInfo = parser->GetNetworkInputBindingInfo(0, input_layer_name);
+ armnnTfLiteParser::BindingPointInfo outputBindingInfo = parser->GetNetworkOutputBindingInfo(0, output_layer_name);
+
+ armnn::TensorShape inShape = inputBindingInfo.second.GetShape();
+ armnn::TensorShape outShape = outputBindingInfo.second.GetShape();
+ std::size_t inSize = inShape[0] * inShape[1] * inShape[2] * inShape[3];
+ std::size_t outSize = outShape[0] * outShape[1];
+
+ armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*network, optOptions, runtime->GetDeviceSpec());
+
+ runtime->LoadNetwork(networkIdentifier, std::move(optNet));
+
+ armnn::DataType input_type = inputBindingInfo.second.GetDataType();
+ armnn::DataType output_type = outputBindingInfo.second.GetDataType();
+ if (input_type != output_type)
+ throw format("Type of graph's input (%d) does not match type of its output (%d).", int(input_type), int(output_type));
+
+ void* input = input_type == armnn::DataType::Float32 ? (void*)new float[inSize] : (void*)new uint8_t[inSize];
+ void* output = output_type == armnn::DataType::Float32 ? (void*)new float[outSize] : (void*)new uint8_t[outSize];
+
+ inputTensor = MakeInputTensors(inputBindingInfo, input);
+ outputTensor = MakeOutputTensors(outputBindingInfo, output);
+
+ switch (input_type) {
+ case armnn::DataType::Float32:
+ if (settings->skip_internal_preprocessing) {
+ cout << "************* Type 1" << endl;
+ benchmark.reset(new ArmNNBenchmark(settings, (float*)input, (float*)output));
+ } else {
+ cout << "************* Type 2" << endl;
+ benchmark.reset(new ArmNNBenchmark(settings, (float*)input, (float*)output));
+ }
+ break;
+
+ case armnn::DataType::QAsymmU8:
+ benchmark.reset(new ArmNNBenchmark(settings, (uint8_t*)input, (uint8_t*)output));
+ break;
+
+ default:
+ throw format("Unsupported type of graph's input: %d. "
+ "Supported types are: Float32 (%d), UInt8 (%d)",
+ int(input_type), int(armnn::DataType::Float32), int(armnn::DataType::QAsymmU8));
+ }
+
+ int out_num = outShape[0];
+ int out_classes = outShape[1];
+ cout << format("Output tensor dimensions: %d*%d", out_num, out_classes) << endl;
+ if (out_classes != settings->num_classes && out_classes != settings->num_classes+1)
+ throw format("Unsupported number of classes in graph's output tensor. Supported numbers are %d and %d",
+ settings->num_classes, settings->num_classes+1);
+ benchmark->has_background_class = out_classes == settings->num_classes+1;
+ }
+
+ ~Program() {
+ }
+
+ //bool is_available_batch() {return session? session->get_next_batch(): false; }
+
+ void LoadNextBatch(const std::vector& img_indices) {
+ auto vl = settings->verbosity_level;
+
+ if( vl > 1 ) {
+ cout << "LoadNextBatch([";
+ for( auto idx : img_indices) {
+ cout << idx << ' ';
+ }
+ cout << "])" << endl;
+ } else if( vl ) {
+ cout << 'B' << flush;
+ }
+ session->load_filenames(img_indices);
+ benchmark->load_images( session );
+
+ if( vl ) {
+ cout << endl;
+ }
+ }
+
+ void ColdRun() {
+ auto vl = settings->verbosity_level;
+
+ if( vl > 1 ) {
+ cout << "Triggering a Cold Run..." << endl;
+ } else if( vl ) {
+ cout << 'C' << flush;
+ }
+
+ if (runtime->EnqueueWorkload(networkIdentifier, inputTensor, outputTensor) != armnn::Status::Success)
+ throw "Failed to invoke the classifier";
+ }
+
+ int InferenceOnce(int img_idx) {
+ benchmark->get_random_image( img_idx );
+
+ if (runtime->EnqueueWorkload(networkIdentifier, inputTensor, outputTensor) != armnn::Status::Success)
+ throw "Failed to invoke the classifier";
+
+ return benchmark->get_next_result();
+ }
+
+ void UnloadBatch(const std::vector& img_indices) {
+ auto b_size = img_indices.size();
+
+ auto vl = settings->verbosity_level;
+
+ if( vl > 1 ) {
+ cout << "Unloading a batch[" << b_size << "]" << endl;
+ } else if( vl ) {
+ cout << 'U' << flush;
+ }
+
+ benchmark->unload_images(b_size);
+ //benchmark->save_results( );
+ }
+
+ const int available_images_max() { return settings->list_of_available_imagefiles().size(); }
+ const int images_in_memory_max() { return settings->images_in_memory_max; }
+
+ BenchmarkSettings *settings;
+private:
+ BenchmarkSession *session;
+ unique_ptr benchmark;
+ armnn::NetworkId networkIdentifier;
+ armnn::OutputTensors outputTensor;
+ armnn::InputTensors inputTensor;
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime;
+};
+
+class SystemUnderTestSingleStream : public mlperf::SystemUnderTest {
+public:
+ SystemUnderTestSingleStream(Program *_prg) : mlperf::SystemUnderTest() {
+ prg = _prg;
+ query_counter = 0;
+ };
+
+ ~SystemUnderTestSingleStream() override = default;
+
+ const std::string& Name() { return name_; }
+
+ void IssueQuery(const std::vector& samples) override {
+
+ ++query_counter;
+ auto vl = prg->settings->verbosity_level;
+ if( vl > 1 ) {
+ cout << query_counter << ") IssueQuery([" << samples.size() << "]," << samples[0].id << "," << samples[0].index << ")" << endl;
+ } else if ( vl ) {
+ cout << 'Q' << flush;
+ }
+
+ std::vector responses;
+ responses.reserve(samples.size());
+ float encoding_buffer[samples.size()];
+ int i=0;
+ for (auto s : samples) {
+ int predicted_class = prg->InferenceOnce(s.index);
+
+ if( vl > 1 ) {
+ cout << "Query image index: " << s.index << " -> Predicted class: " << predicted_class << endl << endl;
+ } else if ( vl ) {
+ cout << 'p' << flush;
+ }
+
+ /* This would be the correct way to pass in one integer index:
+ */
+// int single_value_buffer[] = { (int)predicted_class };
+
+ /* This conversion is subtly but terribly wrong
+ yet we use it here in order to use Guenther's parsing script:
+ */
+ encoding_buffer[i] = (float)predicted_class;
+ responses.push_back({s.id, uintptr_t(&encoding_buffer[i]), sizeof(encoding_buffer[i])});
+ ++i;
+ }
+ mlperf::QuerySamplesComplete(responses.data(), responses.size());
+ }
+
+ void FlushQueries() override {
+ auto vl = prg->settings->verbosity_level;
+ if ( vl ) {
+ cout << endl;
+ }
+ }
+
+ void ReportLatencyResults(const std::vector& latencies_ns) {
+
+ size_t size = latencies_ns.size();
+ uint64_t avg = accumulate(latencies_ns.begin(), latencies_ns.end(), uint64_t(0) )/size;
+
+ std::vector sorted_lat(latencies_ns.begin(), latencies_ns.end());
+ sort(sorted_lat.begin(), sorted_lat.end());
+
+ cout << endl << "------------------------------------------------------------";
+ cout << endl << "| LATENCIES (in nanoseconds and fps) |";
+ cout << endl << "------------------------------------------------------------";
+ size_t p50 = size * 0.5;
+ size_t p90 = size * 0.9;
+ cout << endl << "Number of queries run: " << size;
+ cout << endl << "Min latency: " << sorted_lat[0] << "ns (" << 1e9/sorted_lat[0] << " fps)";
+ cout << endl << "Median latency: " << sorted_lat[p50] << "ns (" << 1e9/sorted_lat[p50] << " fps)";
+ cout << endl << "Average latency: " << avg << "ns (" << 1e9/avg << " fps)";
+ cout << endl << "90 percentile latency: " << sorted_lat[p90] << "ns (" << 1e9/sorted_lat[p90] << " fps)";
+
+ if(!prg->settings->trigger_cold_run) {
+ cout << endl << "First query (cold model) latency: " << latencies_ns[0] << "ns (" << 1e9/latencies_ns[0] << " fps)";
+ }
+ cout << endl << "Max latency: " << sorted_lat[size-1] << "ns (" << 1e9/sorted_lat[size-1] << " fps)";
+ cout << endl << "------------------------------------------------------------ " << endl;
+ }
+
+private:
+ std::string name_{"TFLite_SUT"};
+ Program *prg;
+ long query_counter;
+};
+
+class QuerySampleLibrarySingleStream : public mlperf::QuerySampleLibrary {
+public:
+ QuerySampleLibrarySingleStream(Program *_prg) : mlperf::QuerySampleLibrary() {
+ prg = _prg;
+ };
+
+ ~QuerySampleLibrarySingleStream() = default;
+
+ const std::string& Name() override { return name_; }
+
+ size_t TotalSampleCount() override { return prg->available_images_max(); }
+
+ size_t PerformanceSampleCount() override { return prg->images_in_memory_max(); }
+
+ void LoadSamplesToRam( const std::vector& samples) override {
+ prg->LoadNextBatch(samples);
+ return;
+ }
+
+ void UnloadSamplesFromRam( const std::vector& samples) override {
+ prg->UnloadBatch(samples);
+ return;
+ }
+
+private:
+ std::string name_{"TFLite_QSL"};
+ Program *prg;
+};
+
+void TestSingleStream(Program *prg) {
+ SystemUnderTestSingleStream sut(prg);
+ QuerySampleLibrarySingleStream qsl(prg);
+
+ const std::string mlperf_conf_path = getenv_s("CM_MLPERF_CONF");
+ const std::string user_conf_path = getenv_s("CM_MLPERF_USER_CONF");
+
+ std::string model_name = getenv_opt_s("CM_MODEL", "unknown_model");
+ std::string logs_dir = getenv_opt_s("CM_MLPERF_LOADGEN_LOGS_DIR", "");
+
+ const std::string scenario_string = getenv_s("CM_MLPERF_LOADGEN_SCENARIO");
+ const std::string mode_string = getenv_s("CM_MLPERF_LOADGEN_MODE");
+
+ std::cout << "Path to mlperf.conf : " << mlperf_conf_path << std::endl;
+ std::cout << "Path to user.conf : " << user_conf_path << std::endl;
+ std::cout << "Model Name: " << model_name << std::endl;
+ std::cout << "LoadGen Scenario: " << scenario_string << std::endl;
+ std::cout << "LoadGen Mode: " << ( mode_string != "" ? mode_string : "(empty string)" ) << std::endl;
+
+ mlperf::TestSettings ts;
+
+ // This should have been done automatically inside ts.FromConfig() !
+ ts.scenario = ( scenario_string == "SingleStream") ? mlperf::TestScenario::SingleStream
+ : ( scenario_string == "MultiStream") ? mlperf::TestScenario::MultiStream
+ : ( scenario_string == "Server") ? mlperf::TestScenario::Server
+ : ( scenario_string == "Offline") ? mlperf::TestScenario::Offline : mlperf::TestScenario::SingleStream;
+
+ if( mode_string != "")
+ ts.mode = ( mode_string == "SubmissionRun") ? mlperf::TestMode::SubmissionRun
+ : ( mode_string == "accuracy") ? mlperf::TestMode::AccuracyOnly
+ : ( mode_string == "performance") ? mlperf::TestMode::PerformanceOnly
+ : ( mode_string == "findpeakperformance") ? mlperf::TestMode::FindPeakPerformance : mlperf::TestMode::SubmissionRun;
+
+ if (ts.FromConfig(mlperf_conf_path, model_name, scenario_string)) {
+ std::cout << "Issue with mlperf.conf file at " << mlperf_conf_path << std::endl;
+ exit(1);
+ }
+
+ if (ts.FromConfig(user_conf_path, model_name, scenario_string)) {
+ std::cout << "Issue with user.conf file at " << user_conf_path << std::endl;
+ exit(1);
+ }
+
+ mlperf::LogSettings log_settings;
+ log_settings.log_output.outdir = logs_dir;
+ log_settings.log_output.prefix_with_datetime = false;
+ log_settings.enable_trace = false;
+
+
+ if (prg->settings->trigger_cold_run) {
+ prg->ColdRun();
+ }
+
+ mlperf::StartTest(&sut, &qsl, ts, log_settings);
+}
+
+int main(int argc, char* argv[]) {
+ try {
+ Program *prg = new Program();
+ TestSingleStream(prg);
+ delete prg;
+ }
+ catch (const string& error_message) {
+ cerr << "ERROR: " << error_message << endl;
+ return -1;
+ }
+ return 0;
+}
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py
new file mode 100644
index 0000000000..ec75f7e841
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py
@@ -0,0 +1,120 @@
+#
+# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md
+# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md
+#
+# White paper: https://arxiv.org/abs/2406.16791
+# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md
+# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops
+#
+# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md
+#
+
+from cmind import utils
+import os
+import shutil
+
+
+def preprocess(i):
+
+ os_info = i['os_info']
+
+ if os_info['platform'] == 'windows':
+ return {'return': 1, 'error': 'Windows is not supported in this script yet'}
+ env = i['env']
+
+ if env.get('CM_MLPERF_SKIP_RUN', '') == "yes":
+ return {'return': 0}
+
+ if 'CM_MODEL' not in env:
+ return {
+ 'return': 1, 'error': 'Please select a variation specifying the model to run'}
+ if 'CM_MLPERF_BACKEND' not in env:
+ return {'return': 1,
+ 'error': 'Please select a variation specifying the backend'}
+ if 'CM_MLPERF_DEVICE' not in env:
+ return {
+ 'return': 1, 'error': 'Please select a variation specifying the device to run on'}
+
+ source_files = []
+ script_path = i['run_script_input']['path']
+
+ env['CM_SOURCE_FOLDER_PATH'] = os.path.join(
+ script_path, env['CM_TMP_SRC_FOLDER'])
+
+ for file in os.listdir(env['CM_SOURCE_FOLDER_PATH']):
+ if file.endswith(".c") or file.endswith(".cpp"):
+ source_files.append(file)
+
+ env['CM_CXX_SOURCE_FILES'] = ";".join(source_files)
+
+ if '+CPLUS_INCLUDE_PATH' not in env:
+ env['+CPLUS_INCLUDE_PATH'] = []
+
+ env['+CPLUS_INCLUDE_PATH'].append(os.path.join(script_path, "inc"))
+ env['+C_INCLUDE_PATH'].append(os.path.join(script_path, "inc"))
+
+ # TODO: get cuda path ugly fix
+ if env['CM_MLPERF_DEVICE'] == 'gpu':
+ env['+C_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE'])
+ env['+CPLUS_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE'])
+ env['+LD_LIBRARY_PATH'].append(env['CM_CUDA_PATH_LIB'])
+ env['+DYLD_FALLBACK_LIBRARY_PATH'].append(env['CM_CUDA_PATH_INCLUDE'])
+
+ if '+ CXXFLAGS' not in env:
+ env['+ CXXFLAGS'] = []
+ env['+ CXXFLAGS'].append("-std=c++17")
+
+ # add preprocessor flag like "#define CM_MODEL_RESNET50"
+ env['+ CXXFLAGS'].append('-DCM_MODEL_' + env['CM_MODEL'].upper())
+ # add preprocessor flag like "#define CM_MLPERF_BACKEND_ONNXRUNTIME"
+ env['+ CXXFLAGS'].append('-DCM_MLPERF_BACKEND_' +
+ env['CM_MLPERF_BACKEND'].upper())
+ # add preprocessor flag like "#define CM_MLPERF_DEVICE_CPU"
+ env['+ CXXFLAGS'].append('-DCM_MLPERF_DEVICE_' +
+ env['CM_MLPERF_DEVICE'].upper())
+
+ if '+ LDCXXFLAGS' not in env:
+ env['+ LDCXXFLAGS'] = []
+
+ env['+ LDCXXFLAGS'] += [
+ "-lmlperf_loadgen",
+ "-lpthread"
+ ]
+ # e.g. -lonnxruntime
+ if 'CM_MLPERF_BACKEND_LIB_NAMESPEC' in env:
+ env['+ LDCXXFLAGS'].append('-l' +
+ env['CM_MLPERF_BACKEND_LIB_NAMESPEC'])
+ # e.g. -lcudart
+ if 'CM_MLPERF_DEVICE_LIB_NAMESPEC' in env:
+ env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_DEVICE_LIB_NAMESPEC'])
+
+ if env.get('CM_TMP_LINK_LIBS', []):
+ libs = env['CM_TMP_LINK_LIBS'].split(",")
+ for lib in libs:
+ env['+ LDCXXFLAGS'].append(' -l' + lib)
+
+ env['CM_LINKER_LANG'] = 'CXX'
+ env['CM_RUN_DIR'] = os.getcwd()
+
+ if 'CM_MLPERF_CONF' not in env:
+ env['CM_MLPERF_CONF'] = os.path.join(
+ env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf")
+ if 'CM_MLPERF_USER_CONF' not in env:
+ env['CM_MLPERF_USER_CONF'] = os.path.join(
+ env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf")
+
+ if env.get('CM_DATASET_COMPRESSED', "no").lower() in [
+ "yes", "on", "true"] and "float" in env.get('CM_MLPERF_MODEL_PRECISION', ''):
+ # Use all cores for input preprocessing
+ env['CM_HOST_USE_ALL_CORES'] = "yes"
+ env['CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX2'] = "with_live_preprocessing"
+
+ return {'return': 0}
+
+
+def postprocess(i):
+
+ env = i['env']
+ state = i['state']
+
+ return {'return': 0}
diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/inc/benchmark.h b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/inc/benchmark.h
new file mode 100644
index 0000000000..76f1209a80
--- /dev/null
+++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/inc/benchmark.h
@@ -0,0 +1,488 @@
+/*
+ * Copyright (c) 2018 cTuning foundation.
+ * See CK COPYRIGHT.txt for copyright details.
+ *
+ * See CK LICENSE for licensing details.
+ * See CK COPYRIGHT for copyright details.
+ */
+
+#pragma once
+
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include