diff --git a/.editorconfig b/.editorconfig index ad23db07fc8..5ac3b0f8e5b 100644 --- a/.editorconfig +++ b/.editorconfig @@ -16,6 +16,7 @@ indent_style = space indent_size = 4 [*.md] +max_line_length = 0 trim_trailing_whitespace = false [*.rst] diff --git a/.github/workflows/add-to-project.yml b/.github/workflows/add-to-project.yml index 567f317bcb2..21d1319f4e1 100644 --- a/.github/workflows/add-to-project.yml +++ b/.github/workflows/add-to-project.yml @@ -10,13 +10,13 @@ jobs: name: Add issue to project runs-on: ubuntu-latest steps: - - uses: actions/add-to-project@v1.0.1 + - uses: actions/add-to-project@v1.0.2 with: # You can target a repository in a different organization # to the issue project-url: https://github.com/orgs/lablup/projects/11 github-token: ${{ secrets.ADD_TO_PROJECT_PAT }} - - uses: actions/add-to-project@v1.0.1 + - uses: actions/add-to-project@v1.0.2 with: project-url: https://github.com/orgs/lablup/projects/20 github-token: ${{ secrets.ADD_TO_PROJECT_PAT }} diff --git a/.github/workflows/pr-number-assign.yml b/.github/workflows/assign-pr-number.yml similarity index 97% rename from .github/workflows/pr-number-assign.yml rename to .github/workflows/assign-pr-number.yml index dec40a83d43..c213c147f8e 100644 --- a/.github/workflows/pr-number-assign.yml +++ b/.github/workflows/assign-pr-number.yml @@ -15,6 +15,7 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 2 + token: ${{ secrets.WORKFLOW_PAT }} - name: Extract Python version from pants.toml run: | diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 2d457c77fe8..052b5b9f43e 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -84,6 +84,8 @@ jobs: needs: backport-target-branch strategy: matrix: ${{ fromJson(needs.backport-target-branch.outputs.matrix) }} + permissions: + pull-requests: write steps: - uses: actions/checkout@v4 with: @@ -92,7 +94,7 @@ jobs: run: | git config --global user.name "${{ needs.backport-target-branch.outputs.author }}" git config --global user.email "${{ needs.backport-target-branch.outputs.author_email }}" - git fetch origin main --depth=2 + git fetch origin main --depth=10 git cherry-pick --strategy=recursive --strategy-option=theirs ${{ needs.backport-target-branch.outputs.latest_commit }} git commit \ --amend -m '${{ needs.backport-target-branch.outputs.commit_message }}' \ diff --git a/.github/workflows/build-static-wsproxy.yml b/.github/workflows/build-static-wsproxy.yml deleted file mode 100644 index f9fed22d65e..00000000000 --- a/.github/workflows/build-static-wsproxy.yml +++ /dev/null @@ -1,64 +0,0 @@ -name: Build static wsproxy - -on: - workflow_dispatch: - inputs: - branch: - description: 'The WebUI branch to use' - required: true - default: 'main' - type: string - -jobs: - build-static-wsproxy: - strategy: - fail-fast: false - matrix: - # ubuntu-latest: intel - # linux-aarch64: aarch64 (self-hosted) - # macos-12: intel - # macos-13: apple silicon - os: [ubuntu-latest, linux-aarch64, macos-13-xlarge, macos-12-large] - runs-on: ${{ matrix.os }} - steps: - - name: Clone the webui repo - uses: actions/checkout@v4 - with: - repository: lablup/backend.ai-webui - ref: ${{ inputs.branch }} - - if: ${{ !endsWith(matrix.os, 'linux-aarch64') }} - uses: actions/setup-node@v4 - with: - node-version: '20.8.0' - # For linux-aarch64 runner, we assume that we have the correct NodeJS version already (using asdf). - - name: Build wsproxy - run: | - npm i - OS_TYPE=$(uname -s) - CPU_ARCH=$(uname -m) - SYSTEM="$OS_TYPE $CPU_ARCH" - case "$SYSTEM" in - "Linux x86_64" ) - PLATFORM="linux_x64" - ;; - "Linux aarch64" ) - PLATFORM="linux_arm64" - ;; - "Darwin x86_64" ) - PLATFORM="mac_x64" - npm rebuild - ;; - "Darwin arm64" ) - PLATFORM="mac_arm64" - npm rebuild - ;; - esac - make -j1 "$PLATFORM" - - name: Show the build result - run: | - ls -lh app/ - - name: Upload artifacts - uses: actions/upload-artifact@v3 - with: - name: static-wsproxy - path: app/backend.ai-local-proxy-* diff --git a/.github/workflows/default.yml b/.github/workflows/default.yml index 70412bf348b..76e7bc80aac 100644 --- a/.github/workflows/default.yml +++ b/.github/workflows/default.yml @@ -22,7 +22,8 @@ jobs: else echo "GIT_FETCH_DEPTH=2" >> "${GITHUB_ENV}" fi - - uses: actions/checkout@v4 + - name: Check out the revision with minimal required history + uses: actions/checkout@v4 with: fetch-depth: ${{ env.GIT_FETCH_DEPTH }} - name: Extract Python version from pants.toml @@ -79,7 +80,7 @@ jobs: typecheck: - if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip:ci') && github.event.pull_request.merged == false }} + if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip:ci') && !contains(fromJSON('["flow:merge-queue", "flow:hotfix"]'), github.event.label.name) && github.event.pull_request.merged == false }} runs-on: ubuntu-latest steps: - name: Calculate the fetch depth @@ -89,7 +90,8 @@ jobs: else echo "GIT_FETCH_DEPTH=2" >> "${GITHUB_ENV}" fi - - uses: actions/checkout@v4 + - name: Check out the revision with minimal required history + uses: actions/checkout@v4 with: fetch-depth: ${{ env.GIT_FETCH_DEPTH }} - name: Extract Python version from pants.toml @@ -142,7 +144,7 @@ jobs: test: - if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip:ci') && github.event.pull_request.merged == false }} + if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip:ci') && !contains(fromJSON('["flow:merge-queue", "flow:hotfix"]'), github.event.label.name) && github.event.pull_request.merged == false }} runs-on: [ubuntu-latest-8-cores] steps: - name: Calculate the fetch depth @@ -152,7 +154,8 @@ jobs: else echo "GIT_FETCH_DEPTH=2" >> "${GITHUB_ENV}" fi - - uses: actions/checkout@v4 + - name: Check out the revision with minimal required history + uses: actions/checkout@v4 with: fetch-depth: ${{ env.GIT_FETCH_DEPTH }} - name: Create LFS file hash list @@ -220,14 +223,15 @@ jobs: strategy: fail-fast: false matrix: - # ubuntu-latest: intel - # linux-aarch64: aarch64 (self-hosted) + # ubuntu-latest: x86-64 + # ubuntu-22.04-arm64: aarch64 # macos-12: intel # macos-13: apple silicon - os: [ubuntu-latest, linux-aarch64, macos-13-xlarge, macos-12-large] + os: [ubuntu-latest, ubuntu-22.04-arm64, macos-13-xlarge, macos-12-large] runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v4 + - name: Check out the revision + uses: actions/checkout@v4 - name: Fetch remote tags run: git fetch origin 'refs/tags/*:refs/tags/*' -f - name: Create LFS file hash list @@ -245,55 +249,14 @@ jobs: PYTHON_VERSION=$(awk -F'["]' '/CPython==/ {print $2; exit}' pants.toml | sed 's/CPython==//') echo "PANTS_CONFIG_FILES=pants.ci.toml" >> $GITHUB_ENV echo "PROJECT_PYTHON_VERSION=$PYTHON_VERSION" >> $GITHUB_ENV - - name: Install coreutils for macOS if: ${{ startsWith(matrix.os, 'macos') }} run: brew install coreutils - - if: ${{ !endsWith(matrix.os, 'linux-aarch64') }} + - name: Set up Python as Runtime uses: actions/setup-python@v5 with: python-version: ${{ env.PROJECT_PYTHON_VERSION }} cache: "pip" - # For linux-aarch64 runner, we assume that we have the correct prebuilt Python version already. - - - name: Unpack local-proxy binaries - run: | - OS_TYPE=$(uname -s) - CPU_ARCH=$(uname -m) - SYSTEM="$OS_TYPE $CPU_ARCH" - case "$SYSTEM" in - "Linux x86_64" ) - SRC_PLATFORM="linux-x64" - DST_PLATFORM="linux-x86_64" - CHECKSUM_CMD="sha256sum" - ;; - "Linux aarch64" ) - SRC_PLATFORM="linux-arm64" - DST_PLATFORM="linux-aarch64" - CHECKSUM_CMD="sha256sum" - ;; - "Darwin x86_64" ) - SRC_PLATFORM="macos-x64" - DST_PLATFORM="macos-x86_64" - CHECKSUM_CMD="shasum -a 256" - ;; - "Darwin arm64" ) - SRC_PLATFORM="macos-arm64" - DST_PLATFORM="macos-aarch64" - CHECKSUM_CMD="shasum -a 256" - ;; - esac - mkdir dist-local-proxy - # Normalize the package naming - unzip "src/ai/backend/web/assets/backend.ai-local-proxy-$SRC_PLATFORM.zip" - mv "backend.ai-local-proxy" "dist-local-proxy/backendai-local-proxy-$DST_PLATFORM" - cd dist-local-proxy - ls | xargs -I{} sh -c "$CHECKSUM_CMD {} > {}.sha256" - - name: Upload local-proxy binaries - uses: actions/upload-artifact@v4 - with: - name: local-proxy-${{ matrix.os }} - path: dist-local-proxy/* - name: Bootstrap Pants uses: pantsbuild/actions/init-pants@v8 with: @@ -326,7 +289,8 @@ jobs: if: github.event_name == 'push' && contains(github.ref, 'refs/tags/') runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - name: Check out the revision + uses: actions/checkout@v4 - name: Fetch remote tags run: git fetch origin 'refs/tags/*:refs/tags/*' -f - name: Create LFS file hash list @@ -395,6 +359,8 @@ jobs: if: always() # We want the log even on failures. build-sbom: + needs: [lint, typecheck, test] + if: ${{ github.event_name == 'push' && contains(github.ref, 'refs/tags/') }} uses: ./.github/workflows/sbom.yml make-final-release: @@ -422,7 +388,7 @@ jobs: pip install -U -r tools/towncrier-requirements.txt - name: Install local dependencies for packaging run: | - pip install -U 'twine~=4.0' 'packaging>=21.3' + pip install -U 'twine~=5.0' 'packaging>=21.3' - name: Extract the release changelog run: | python ./scripts/extract-release-changelog.py @@ -438,12 +404,6 @@ jobs: pattern: scies-* path: dist merge-multiple: true - - name: Download local-proxy - uses: actions/download-artifact@v4 - with: - pattern: local-proxy-* - path: dist - merge-multiple: true - name: Download SBOM report uses: actions/download-artifact@v4 with: @@ -553,7 +513,8 @@ jobs: env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - - uses: actions/checkout@v4 + - name: Check out the revision + uses: actions/checkout@v4 - name: Create LFS file hash list run: git lfs ls-files -l | cut -d ' ' -f1 | sort > .lfs-assets-id - name: Restore LFS cache diff --git a/.github/workflows/pr-prefix.yml b/.github/workflows/pr-prefix.yml index e7ee83a9cd3..7c226be5a6a 100644 --- a/.github/workflows/pr-prefix.yml +++ b/.github/workflows/pr-prefix.yml @@ -9,19 +9,24 @@ jobs: runs-on: ubuntu-latest steps: - name: Check PR title prefix + env: + TITLE: ${{ github.event.pull_request.title }} run: | - if echo "${{ github.event.pull_request.title }}" | tr ':' '\n' | head -n 1 | grep -qE 'feat|fix|docs|refactor|ci|chore(\([^)]+\)|$)'; + if echo "$TITLE" | tr ':' '\n' | head -n 1 | grep -qE '^(feat|fix|docs?|refactor|ci|chore(\([^)]+\))?|deps)$'; then echo "PR title is valid." else echo "PR title is invalid." echo "Use the title prefixes like:" - echo " feat: (for new features and functionality)" - echo " fix: (for bug fixes and revisions on how things work)" - echo " docs: (for docs, docstring, and comment changes)" - echo " refactor: (for refactoring and revisions on how things are related)" - echo " ci: (for changes related to CI/CD workflows)" - echo " chore(...): (for changes related to repo/build configs, dependencies, etc.)" + echo " feat: (for new features and functionality)" + echo " fix: (for bug fixes and revisions on how things work)" + echo " doc: (for docs, docstring, and comment changes)" + echo " docs: (for docs, docstring, and comment changes)" + echo " refactor: (for refactoring and revisions on how things are related)" + echo " ci: (for changes related to CI/CD workflows)" + echo " chore: (for changes related to repo/build configs, tool dependencies, etc.)" + echo " chore(...): (for changes related to repo/build configs, tool dependencies, etc.)" + echo " deps: (for changes related to upstream dependencies, etc.)" echo "following the conventional commit style." exit 1 fi diff --git a/.github/workflows/sbom.yml b/.github/workflows/sbom.yml index b0996a615fc..3fa9386f322 100644 --- a/.github/workflows/sbom.yml +++ b/.github/workflows/sbom.yml @@ -4,31 +4,23 @@ on: [workflow_dispatch, workflow_call] jobs: sbom: - if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip:ci') }} runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 2 - - name: Extract Python version from pants.toml + - name: Calculate the fetch depth run: | - PYTHON_VERSION=$(grep -m 1 -oP '(?<=CPython==)([^"]+)' pants.toml) - echo "PROJECT_PYTHON_VERSION=$PYTHON_VERSION" >> $GITHUB_ENV - - name: Set up Python as Runtime - uses: actions/setup-python@v5 - with: - python-version: ${{ env.PROJECT_PYTHON_VERSION }} - cache: pip - - name: Prepare cache dir for Pants - run: mkdir -p .tmp - - name: Bootstrap Pants - uses: pantsbuild/actions/init-pants@v8 + if [ "$GITHUB_EVENT_NAME" == "pull_request" ]; then + echo "GIT_FETCH_DEPTH=$(( ${{ github.event.pull_request.commits }} + 1 ))" >> "${GITHUB_ENV}" + else + echo "GIT_FETCH_DEPTH=2" >> "${GITHUB_ENV}" + fi + - name: Checkout the source tree + uses: actions/checkout@v4 with: - gha-cache-key: pants-cache-main-1-sbom-py${{ env.PROJECT_PYTHON_VERSION }}-${{ runner.os }}-${{ runner.arch }} - named-caches-hash: ${{ hashFiles('python*.lock', 'tools/*.lock') }} - cache-lmdb-store: 'true' - - uses: CycloneDX/gh-python-generate-sbom@v2 - - name: Upload SBOM report + lfs: false + fetch-depth: ${{ env.GIT_FETCH_DEPTH }} + - name: Generate the SBOM report + uses: CycloneDX/gh-python-generate-sbom@v2 + - name: Upload the SBOM report uses: actions/upload-artifact@v4 with: name: SBOM report diff --git a/.github/workflows/timeline-check.yml b/.github/workflows/timeline-check.yml index 46cbfd99dcc..71fa61c5d7b 100644 --- a/.github/workflows/timeline-check.yml +++ b/.github/workflows/timeline-check.yml @@ -8,14 +8,14 @@ permissions: contents: write jobs: - pr-number-assign: + assign-pr-number: if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip:changelog') && !contains(fromJSON('["flow:merge-queue", "flow:hotfix"]'), github.event.label.name) && github.event.pull_request.number != null && github.event.pull_request.merged == false }} - uses: ./.github/workflows/pr-number-assign.yml + uses: ./.github/workflows/assign-pr-number.yml secrets: - WORKFLOW_PAT: ${{ secrets.WORKFLOW_PAT }} + WORKFLOW_PAT: ${{ secrets.OCTODOG }} towncrier: - needs: [pr-number-assign] + needs: [assign-pr-number] runs-on: ubuntu-latest steps: - name: Get PR's fetch depth diff --git a/.github/workflows/update-api-schema.yml b/.github/workflows/update-api-schema.yml index 074b08b0eca..a82a6c4e3ba 100644 --- a/.github/workflows/update-api-schema.yml +++ b/.github/workflows/update-api-schema.yml @@ -22,6 +22,7 @@ jobs: with: fetch-depth: ${{ env.GIT_FETCH_DEPTH }} ref: ${{ github.head_ref }} + token: ${{ secrets.OCTODOG }} - name: Extract Python version from pants.toml run: | PYTHON_VERSION=$(grep -m 1 -oP '(?<=CPython==)([^"]+)' pants.toml) @@ -71,13 +72,13 @@ jobs: name: Check Schema runs-on: ubuntu-latest permissions: - contents: read + contents: read pull-requests: write checks: write steps: - uses: actions/checkout@v4 - uses: kamilkisiela/graphql-inspector@release-1717403590269 with: - schema: 'main:src/ai/backend/manager/api/schema.graphql' + schema: '${{ github.base_ref }}:src/ai/backend/manager/api/schema.graphql' rules: | gql-inspector-checker.js diff --git a/.gitignore b/.gitignore index ac740cd8250..38ab7d96c0b 100644 --- a/.gitignore +++ b/.gitignore @@ -115,6 +115,7 @@ ENV/ /dev.etcd.installed.json /env-*.sh /agent.dummy.toml +/wsproxy.toml # Local temp and installer-generated directories /.tmp/ diff --git a/BUILD b/BUILD index b241da537ed..63c25e8f4c8 100644 --- a/BUILD +++ b/BUILD @@ -9,6 +9,7 @@ python_requirements( "pycryptodome": ["Crypto"], "python-dateutil": ["dateutil", "dateutil.parser", "dateutil.tz"], "python-json-logger": ["pythonjsonlogger"], + "pyhumps": ["humps"], "pyzmq": ["zmq"], "PyYAML": ["yaml"], "typing-extensions": ["typing_extensions"], diff --git a/changes/2050.feature.md b/changes/2050.feature.md new file mode 100644 index 00000000000..2fa85de911a --- /dev/null +++ b/changes/2050.feature.md @@ -0,0 +1 @@ +Introduce the `rolling_count` GraphQL field to provide the current rate limit counter for a keypair within the designated time window slice diff --git a/changes/2070.feature.md b/changes/2070.feature.md new file mode 100644 index 00000000000..a084f4c3949 --- /dev/null +++ b/changes/2070.feature.md @@ -0,0 +1 @@ +Add the `backend.ai plugin scan` command to inspect the plugin scan results from various entrypoint sources diff --git a/changes/2127.fix.md b/changes/2127.fix.md new file mode 100644 index 00000000000..f937ffcfea4 --- /dev/null +++ b/changes/2127.fix.md @@ -0,0 +1 @@ +Add missing `commit_session_to_file` to `OP_EXC` \ No newline at end of file diff --git a/changes/2134.fix.md b/changes/2134.fix.md new file mode 100644 index 00000000000..44248bdbf03 --- /dev/null +++ b/changes/2134.fix.md @@ -0,0 +1 @@ +Pass ImageRef.canonical in `commit_session_to_file` \ No newline at end of file diff --git a/changes/2145.fix.md b/changes/2145.fix.md new file mode 100644 index 00000000000..5cece254327 --- /dev/null +++ b/changes/2145.fix.md @@ -0,0 +1 @@ +Skip any possible redundant quota update requests when creating new quota diff --git a/changes/2160.fix.md b/changes/2160.fix.md new file mode 100644 index 00000000000..c98659f1d62 --- /dev/null +++ b/changes/2160.fix.md @@ -0,0 +1 @@ +Fix user creation error when any model-store does not exists. diff --git a/changes/2161.fix.md b/changes/2161.fix.md new file mode 100644 index 00000000000..151ced7a8d4 --- /dev/null +++ b/changes/2161.fix.md @@ -0,0 +1 @@ +Fix buggy resolver of `model_card` GQL Query. \ No newline at end of file diff --git a/changes/2178.fix.md b/changes/2178.fix.md new file mode 100644 index 00000000000..cc3e5bd4629 --- /dev/null +++ b/changes/2178.fix.md @@ -0,0 +1 @@ +Keep `sync_container_lifecycles()` bgtask alive in a loop. diff --git a/changes/2190.fix.md b/changes/2190.fix.md new file mode 100644 index 00000000000..1d31d3170ef --- /dev/null +++ b/changes/2190.fix.md @@ -0,0 +1 @@ +Fix missing check for group (project) vfolder count limit and error handling with an invalid `group` parameter diff --git a/changes/2205.fix.md b/changes/2205.fix.md new file mode 100644 index 00000000000..205f6a2a17b --- /dev/null +++ b/changes/2205.fix.md @@ -0,0 +1 @@ +Ensure that utilization idleness is checked after a set period. diff --git a/changes/2245.fix.md b/changes/2245.fix.md new file mode 100644 index 00000000000..0be8960db3e --- /dev/null +++ b/changes/2245.fix.md @@ -0,0 +1 @@ +Fix `ZeroDivisionError` in volume usage calculation by returning 0% when volume capacity is zero diff --git a/changes/2250.fix.md b/changes/2250.fix.md new file mode 100644 index 00000000000..8e8340ff5c7 --- /dev/null +++ b/changes/2250.fix.md @@ -0,0 +1 @@ +Fix GraphQL to support query to non-installed images diff --git a/changes/2253.fix.md b/changes/2253.fix.md new file mode 100644 index 00000000000..8e85afc1017 --- /dev/null +++ b/changes/2253.fix.md @@ -0,0 +1 @@ +Add missing `push_image` method implementation to Dummy Agent diff --git a/changes/2254.feature.md b/changes/2254.feature.md new file mode 100644 index 00000000000..86fa74e28c0 --- /dev/null +++ b/changes/2254.feature.md @@ -0,0 +1 @@ +Add `scaling_group.agent_count_by_status` and `scaling_group.agent_total_resource_slots_by_status` GQL fields to query the count and the resource allocation of agents that belong to a scaling group. diff --git a/changes/2275.feature.md b/changes/2275.feature.md new file mode 100644 index 00000000000..2c17a3465cd --- /dev/null +++ b/changes/2275.feature.md @@ -0,0 +1 @@ +Allow superadmins to force-update session status through destroy API. diff --git a/changes/2314.fix.md b/changes/2314.fix.md new file mode 100644 index 00000000000..be7ecb2b72c --- /dev/null +++ b/changes/2314.fix.md @@ -0,0 +1 @@ +Corrected an issue where the `resource_policy` field in the user model was incorrectly mapped to `domain_name`. diff --git a/changes/2316.misc.md b/changes/2316.misc.md new file mode 100644 index 00000000000..8719a650b92 --- /dev/null +++ b/changes/2316.misc.md @@ -0,0 +1 @@ +Handle container creation exception and start exception in separate try-except contexts. diff --git a/changes/2317.fix.md b/changes/2317.fix.md new file mode 100644 index 00000000000..c4e6f72a887 --- /dev/null +++ b/changes/2317.fix.md @@ -0,0 +1 @@ +Omit to clean containerless kernels which are still creating its container. diff --git a/changes/2327.fix.md b/changes/2327.fix.md new file mode 100644 index 00000000000..84b0e426d93 --- /dev/null +++ b/changes/2327.fix.md @@ -0,0 +1 @@ +Run batch execution after the batch session starts. diff --git a/changes/2338.fix.md b/changes/2338.fix.md new file mode 100644 index 00000000000..bda3e39fd72 --- /dev/null +++ b/changes/2338.fix.md @@ -0,0 +1 @@ +Add support for configuring `sync_container_lifecycles()` task. diff --git a/changes/2339.deps.md b/changes/2339.deps.md new file mode 100644 index 00000000000..e2e531659ea --- /dev/null +++ b/changes/2339.deps.md @@ -0,0 +1 @@ +Upgrade aiodocker to v0.22.0 with minor bug fixes found by improved type annotations diff --git a/changes/2358.misc.md b/changes/2358.misc.md new file mode 100644 index 00000000000..270d5bdda95 --- /dev/null +++ b/changes/2358.misc.md @@ -0,0 +1 @@ +Fix broken the workflow call for the action that auto-assigns PR numbers to news fragments diff --git a/changes/2364.feature.md b/changes/2364.feature.md new file mode 100644 index 00000000000..b4316110bf5 --- /dev/null +++ b/changes/2364.feature.md @@ -0,0 +1 @@ +Add support for fetching container logs of a specific kernel. diff --git a/changes/2371.fix.md b/changes/2371.fix.md new file mode 100644 index 00000000000..03b5d051f5b --- /dev/null +++ b/changes/2371.fix.md @@ -0,0 +1 @@ +Fix mismatches between responses of `/services/_runtimes` and new model service creation input diff --git a/changes/2372.feature.md b/changes/2372.feature.md new file mode 100644 index 00000000000..5567517eb75 --- /dev/null +++ b/changes/2372.feature.md @@ -0,0 +1 @@ +Introduce Python native WSProxy diff --git a/changes/2379.misc.md b/changes/2379.misc.md new file mode 100644 index 00000000000..c82d3cf40be --- /dev/null +++ b/changes/2379.misc.md @@ -0,0 +1 @@ +Finally stabilize the hanging tests in our CI due to docker-internal races on TCP port mappings to concurrently spawned fixture containers by introducing monotonically increasing TCP port numbers diff --git a/changes/2389.fix.md b/changes/2389.fix.md new file mode 100644 index 00000000000..9a37bba9369 --- /dev/null +++ b/changes/2389.fix.md @@ -0,0 +1 @@ +Fix incorrect check of values returned from docker stat API. diff --git a/changes/2392.fix.md b/changes/2392.fix.md new file mode 100644 index 00000000000..5b6bdc644a6 --- /dev/null +++ b/changes/2392.fix.md @@ -0,0 +1 @@ +Shutdown agent properly by removing a code that waits a cancelled task. diff --git a/changes/2396.misc.md b/changes/2396.misc.md new file mode 100644 index 00000000000..7dc6746633e --- /dev/null +++ b/changes/2396.misc.md @@ -0,0 +1 @@ +Further improve the monotonic port allocation logic for the test containers to remove maximum concurrency restrictions diff --git a/changes/2401.fix.md b/changes/2401.fix.md new file mode 100644 index 00000000000..00d68f7397e --- /dev/null +++ b/changes/2401.fix.md @@ -0,0 +1 @@ +Restrict GraphQL query to `user_nodes` field to require `superadmin` privilege diff --git a/changes/2402.deps.md b/changes/2402.deps.md new file mode 100644 index 00000000000..caf184bbf5b --- /dev/null +++ b/changes/2402.deps.md @@ -0,0 +1 @@ +Upgrade aiodocker to 0.22.1 to fix error handling when trying to extract the log of non-existing containers diff --git a/changes/2404.enhance.md b/changes/2404.enhance.md new file mode 100644 index 00000000000..10c83081a63 --- /dev/null +++ b/changes/2404.enhance.md @@ -0,0 +1 @@ +Remove database-level foreign key constraints in `vfolders.{user,group}` columns to decouple the timing of vfolder deletion and user/group deletion. diff --git a/changes/2409.feature.md b/changes/2409.feature.md new file mode 100644 index 00000000000..de19dd28045 --- /dev/null +++ b/changes/2409.feature.md @@ -0,0 +1 @@ +Add `row_id`, `type` and `container_registry` fields to the `GroupNode` GQL schema. diff --git a/changes/2415.fix.md b/changes/2415.fix.md new file mode 100644 index 00000000000..56a5052fad1 --- /dev/null +++ b/changes/2415.fix.md @@ -0,0 +1 @@ +Utilize `ExtendedJSONEncoder` for error logging to handle `UUID` objects in `extra_data` diff --git a/changes/2419.feature.md b/changes/2419.feature.md new file mode 100644 index 00000000000..71a25d7f050 --- /dev/null +++ b/changes/2419.feature.md @@ -0,0 +1 @@ +Add support for PureStorage RapidFiles Toolkit v2 diff --git a/changes/2421.fix.md b/changes/2421.fix.md new file mode 100644 index 00000000000..4805279c921 --- /dev/null +++ b/changes/2421.fix.md @@ -0,0 +1 @@ +Change outdated references in event module from `kernels` to `sessions`. diff --git a/changes/2424.fix.md b/changes/2424.fix.md new file mode 100644 index 00000000000..086a833900d --- /dev/null +++ b/changes/2424.fix.md @@ -0,0 +1 @@ +Upgrade `inquirer` to remove dependency on deprecated `distutils`, which breaks up execution of the scie builds diff --git a/changes/2429.fix.md b/changes/2429.fix.md new file mode 100644 index 00000000000..92ca4efedbc --- /dev/null +++ b/changes/2429.fix.md @@ -0,0 +1 @@ +Allow specific status of vfolders to query to purge. diff --git a/changes/2436.fix.md b/changes/2436.fix.md new file mode 100644 index 00000000000..7e148164256 --- /dev/null +++ b/changes/2436.fix.md @@ -0,0 +1 @@ +Update the install-dev scripts to use `pnpm` instead of `npm` to speed up installation and resolve some peculiar version resolution issues related to esbuild. diff --git a/changes/2449.deps.md b/changes/2449.deps.md new file mode 100644 index 00000000000..f567102625d --- /dev/null +++ b/changes/2449.deps.md @@ -0,0 +1 @@ +Upgrade the base CPython from 3.12.2 to 3.12.4 diff --git a/changes/2454.fix.md b/changes/2454.fix.md new file mode 100644 index 00000000000..29946e62335 --- /dev/null +++ b/changes/2454.fix.md @@ -0,0 +1 @@ +Fix a packaging issue in the `backendai-webserver` scie executable due to missing explicit requirement of setuptools diff --git a/changes/2460.fix.md b/changes/2460.fix.md new file mode 100644 index 00000000000..00727180113 --- /dev/null +++ b/changes/2460.fix.md @@ -0,0 +1 @@ +Improve pruning of non-physical filesystems when measuring disk usage in agents diff --git a/configs/agent/sample-dummy-config.toml b/configs/agent/sample-dummy-config.toml index a89fbd92a08..92fad6bc398 100644 --- a/configs/agent/sample-dummy-config.toml +++ b/configs/agent/sample-dummy-config.toml @@ -1,6 +1,7 @@ [agent.delay] scan-image = 0.1 pull-image = 0.1 +push-image = 0.1 destroy-kernel = 0.1 clean-kernel = 0.1 create-network = 0.1 diff --git a/configs/wsproxy/halfstack.toml b/configs/wsproxy/halfstack.toml new file mode 100644 index 00000000000..06d837a27a9 --- /dev/null +++ b/configs/wsproxy/halfstack.toml @@ -0,0 +1,12 @@ +[wsproxy] +bind_host = "0.0.0.0" +advertised_host = "127.0.0.1" + +bind_api_port = 5050 +advertised_api_port = 5050 + +# replace these values with your passphrase +jwt_encrypt_key = "QRX/ZX2nwKKpuTSD3ZycPA" +permit_hash_key = "gHNAohmntRV0t9zlwTVQeQ" + +api_secret = "v625xZLOgbMHhl0s49VuqQ" diff --git a/configs/wsproxy/sample.toml b/configs/wsproxy/sample.toml new file mode 100644 index 00000000000..51f4ff1dad7 --- /dev/null +++ b/configs/wsproxy/sample.toml @@ -0,0 +1,76 @@ +[wsproxy] +ipc_base_path = "/tmp/backend.ai/ipc" +event_loop = "asyncio" +pid_file = "/run/backend.ai/wsproxy/wsproxy.pid" +id = "i-node01" +user = 501 +group = 501 +bind_host = "0.0.0.0" +advertised_host = "example.com" +bind_api_port = 5050 +advertised_api_port = 15050 +bind_proxy_port_range = [ + 10200, + 10300, +] +advertised_proxy_port_range = [ + 20200, + 20300, +] +protocol = "http" + +# replace these values with your passphrase +jwt_encrypt_key = "50M3G00DL00KING53CR3T" +permit_hash_key = "50M3G00DL00KING53CR3T" +api_secret = "50M3G00DL00KING53CR3T" + +aiomonitor_termui_port = 48500 +aiomonitor_webui_port = 49500 + +[logging] +level = "INFO" +drivers = [ + "console", +] + +[logging.pkg_ns] +"" = "WARNING" +"ai.backend" = "DEBUG" +tests = "DEBUG" +aiohttp = "INFO" + +[logging.console] +colored = true +format = "verbose" + +[logging.file] +path = "/var/log/backend.ai" +filename = "wsproxy.log" +backup_count = 5 +rotation_size = "10M" +format = "verbose" + +[logging.logstash] +protocol = "tcp" +ssl_enabled = true +ssl_verify = true + +[logging.logstash.endpoint] +host = "127.0.0.1" +port = 8001 + +[logging.graylog] +host = "127.0.0.1" +port = 8000 +level = "INFO" +ssl_verify = true +ca_certs = "/etc/ssl/ca.pem" +keyfile = "/etc/backend.ai/graylog/privkey.pem" +certfile = "/etc/backend.ai/graylog/cert.pem" + +[debug] +enabled = false +asyncio = false +enhanced_aiomonitor_task_info = false +log_events = false + diff --git a/docs/README.md b/docs/README.md index eaa0818aa57..409b0f7e84f 100644 --- a/docs/README.md +++ b/docs/README.md @@ -7,7 +7,7 @@ Developer guide for Backend.AI documentation ### Installing pyenv and pyenv-virtualenv -* Please refer the official docs: +* Please refer to the official docs: - https://github.com/pyenv/pyenv#installation - https://github.com/pyenv/pyenv-virtualenv#installation @@ -92,7 +92,7 @@ Building PDF requires following libraries to be present on your system. ### Installing dependencies on macOS 1. Install MacTeX from [here](https://www.tug.org/mactex/). There are two types of MacTeX distributions; The BasicTeX one is more lightweight and MacTeX contains most of the libraries commonly used. 2. Follow [here](http://wiki.ktug.org/wiki/wiki.php/KtugPrivateRepository) (Korean) to set up KTUG repository. -3. Exceute following command to install missing dependencies. +3. Execute following command to install missing dependencies. ```console sudo tlmgr install latexmk tex-gyre fncychap wrapfig capt-of framed needspace collection-langkorean collection-fontsrecommended tabulary varwidth titlesec ``` @@ -158,7 +158,7 @@ to interact and inspect the Backend.AI Manager's GraphQL API. The manager's *etcd* configuration should say `config/api/allow-openapi-schema-introspection` is true. 2. Run `backend.ai proxy` command of the client SDK. Depending on your setup, adjust `--bind` and `--port` options. Use the client SDK version 21.03.7+ or 20.09.9+ at least to avoid unexpected CORS issues. -3. From your web browser, avigate to `/spec/openapi` under proxy server set up at step 2. +3. From your web browser, navigate to `/spec/openapi` under proxy server set up at step 2. Enjoy auto-completion and schema introspection of Backend.AI admin API! ### Interactive GraphQL browser @@ -170,7 +170,7 @@ to interact and inspect the Backend.AI Manager's GraphQL API. The manager's *etcd* configuration should say `config/api/allow-graphql-schema-introspection` is true. 2. Run `backend.ai proxy` command of the client SDK. Depending on your setup, adjust `--bind` and `--port` options. Use the client SDK version 21.03.7+ or 20.09.9+ at least to avoid unexpected CORS issues. -3. From your web browser, avigate to `/spec/graphiql` under proxy server set up at step 2. +3. From your web browser, navigate to `/spec/graphiql` under proxy server set up at step 2. Enjoy auto-completion and schema introspection of Backend.AI admin API! diff --git a/docs/client/cli/sessions.rst b/docs/client/cli/sessions.rst index a72a2edfa14..a0f8f81077c 100644 --- a/docs/client/cli/sessions.rst +++ b/docs/client/cli/sessions.rst @@ -58,21 +58,24 @@ Both commands offer options to specify which fields of sessions should be printe - Included Session Fields * - (no option) - - ``Session ID``, ``Owner``, ``Image``, ``Type``, + - ``Name``, ``Owner Access Key (admin only)``, ``Session ID``, ``Project/Group``, - ``Status``, ``Status Info``, ``Last updated``, and ``Result``. + ``Main Kernel ID``, ``Image``, ``Type``, ``Status``, - * - ``--id-only`` - - ``Session ID``. + ``Status Info``, ``Last Updated``, and ``Result``. + + + * - ``--name-only`` + - ``Name``. * - ``--detail`` - - ``Session ID``, ``Owner``, ``Image``, ``Type``, + - ``Name``, ``Session ID``, ``Project/Group``, - ``Status``, ``Status Info``, ``Last updated``, ``Result``, + ``Main Kernel ID``, ``Image``, ``Type``, ``Status``, - ``Tag``, ``Created At``, ``Occupied Resource``, ``Used Memory (MiB)``, + ``Status Info``, ``Last Updated``, ``Result``, ``Tag``, - ``Max Used Memory (MiB)``, and ``CPU Using (%)``. + ``Created At``, and ``Occupying Slots``. * - ``-f``, ``--format`` - Specified fields by user. @@ -80,13 +83,13 @@ Both commands offer options to specify which fields of sessions should be printe .. note:: Fields for ``-f/--format`` option can be displayed by specifying comma-separated parameters. - Available parameters for this option are: ``id``, ``status``, ``status_info``, ``created_at``, ``last_updated``, ``result``, ``image``, ``type``, ``task_id``, ``tag``, ``occupied_slots``, ``used_memory``, ``max_used_memory``, ``cpu_using``. + Available parameters for this option are: ``id (session_id)``, ``main_kernel_id``, ``tag``, ``name``, ``type``, ``image``, ``registry``, ``cluster_template (reserved for future release)``, ``cluster_mode``, ``cluster_size``, ``domain_name``, ``group_name``, ``group_id``, ``agent_ids``, ``user_email``, ``user_id``, ``access_key``, ``status``, ``status_info``, ``status_changed``, ``created_at``, ``terminated_at``, ``starts_at``, ``scheduled_at``, ``startup_command``, ``result``, ``resource_opts``, ``scaling_group``, ``service_ports``, ``mounts``, ``occupying_slots``, ``dependencies``, ``abusing_reports``, ``idle_checks``. For example: .. code-block:: shell - backend.ai admin session --format id,status,cpu_using + backend.ai admin session list --format id,status,occupying_slots .. _simple-execution: diff --git a/docs/dev/adding-kernels.rst b/docs/dev/adding-kernels.rst index dfdf51ccef8..fd389385eda 100644 --- a/docs/dev/adding-kernels.rst +++ b/docs/dev/adding-kernels.rst @@ -124,7 +124,7 @@ The label may contain multiple port mapping declarations separated by commas, li jupyter:http:8080,tensorboard:http:6006 -The name may be an non-empty arbitrary ASCII alphanumeric string. +The name may be a non-empty arbitrary ASCII alphanumeric string. We use the kebab-case for it. The protocol may be one of ``tcp``, ``http``, and ``pty``, but currently most services use ``http``. @@ -138,7 +138,7 @@ Service Definition DSL Now the image author should define the service launch sequences using a DSL (domain-specific language). The service definitions are written as JSON files in the container's ``/etc/backend.ai/service-defs`` directory. -The file names must be same with the name parts of the port mapping declarations. +The file names must be same as the name parts of the port mapping declarations. For example, a sample service definition file for "jupyter" service (hence its filename must be ``/etc/backend.ai/service-defs/jupyter.json``) looks like: @@ -254,7 +254,7 @@ Adding Custom Jail Policy ~~~~~~~~~~~~~~~~~~~~~~~~~ To write a new policy implementation, extend `the jail policy interface `_ in Go. -Ebmed it inside your jail build. +Embed it inside your jail build. Please give a look to existing jail policies as good references. @@ -288,7 +288,7 @@ Custom startup scripts (aka custom entrypoint) ---------------------------------------------- When the image has *preopen* service ports and/or an endpoint port, Backend.AI automatically sets up application proxy tunnels -as if the listening applications are already started. +as if the listening applications have already started. To initialize and start such applications, put a shell script as ``/opt/container/bootstrap.sh`` when building the image. This per-image bootstrap script is executed as *root* by the agent-injected ``entrypoint.sh``. @@ -364,7 +364,7 @@ The key concept is separation of the "outer" daemon and the "inner" target progr The outer daemon should wrap the inner program inside a pseudo-tty. As the outer daemon is completely hidden in terminal interaction by the end-users, the programming language may differ from the inner program. The challenge is that you need to implement piping of ZeroMQ sockets from/to pseudo-tty file descriptors. -It is up to you how you implement the outer daemon, but if you choose Python for it, we recommend to use asyncio or similar event loop libraries such as tornado and Twisted to mulitplex sockets and file descriptors for both input/output directions. +It is up to you how you implement the outer daemon, but if you choose Python for it, we recommend using asyncio or similar event loop libraries such as tornado and Twisted to mulitplex sockets and file descriptors for both input/output directions. When piping the messages, the outer daemon should not apply any specific transformation; it should send and receive all raw data/control byte sequences transparently because the front-end (e.g., terminal.js) is responsible for interpreting them. Currently we use PUB/SUB ZeroMQ socket types but this may change later. diff --git a/docs/dev/daily-workflows.rst b/docs/dev/daily-workflows.rst index 5de0ce25539..25f0910a332 100644 --- a/docs/dev/daily-workflows.rst +++ b/docs/dev/daily-workflows.rst @@ -209,7 +209,7 @@ you should also configure ``PYTHONPATH`` to include the repository root's ``src` For linters and formatters, configure the tool executable paths to indicate ``dist/export/python/virtualenvs/RESOLVE_NAME/PYTHON_VERSION/bin/EXECUTABLE``. For example, ruff's executable path is -``dist/export/python/virtualenvs/ruff/3.12.2/bin/ruff``. +``dist/export/python/virtualenvs/ruff/3.12.4/bin/ruff``. Currently we have the following Python tools to configure in this way: @@ -259,7 +259,7 @@ Set the workspace settings for the Python extension for code navigation and auto * - ``python.analysis.autoSearchPaths`` - true * - ``python.analysis.extraPaths`` - - ``["dist/export/python/virtualenvs/python-default/3.12.2/lib/python3.12/site-packages"]`` + - ``["dist/export/python/virtualenvs/python-default/3.12.4/lib/python3.12/site-packages"]`` * - ``python.analysis.importFormat`` - ``"relative"`` * - ``editor.formatOnSave`` @@ -275,11 +275,11 @@ Set the following keys in the workspace settings to configure Python tools: * - Setting ID - Example value * - ``mypy-type-checker.interpreter`` - - ``["dist/export/python/virtualenvs/mypy/3.12.2/bin/python"]`` + - ``["dist/export/python/virtualenvs/mypy/3.12.4/bin/python"]`` * - ``mypy-type-checker.importStrategy`` - ``"fromEnvironment"`` * - ``ruff.interpreter`` - - ``["dist/export/python/virtualenvs/ruff/3.12.2/bin/python"]`` + - ``["dist/export/python/virtualenvs/ruff/3.12.4/bin/python"]`` * - ``ruff.importStrategy`` - ``"fromEnvironment"`` @@ -309,8 +309,8 @@ Then put the followings in ``.vimrc`` (or ``.nvimrc`` for NeoVim) in the build r .. code-block:: vim let s:cwd = getcwd() - let g:ale_python_mypy_executable = s:cwd . '/dist/export/python/virtualenvs/mypy/3.12.2/bin/mypy' - let g:ale_python_ruff_executable = s:cwd . '/dist/export/python/virtualenvs/ruff/3.12.2/bin/ruff' + let g:ale_python_mypy_executable = s:cwd . '/dist/export/python/virtualenvs/mypy/3.12.4/bin/mypy' + let g:ale_python_ruff_executable = s:cwd . '/dist/export/python/virtualenvs/ruff/3.12.4/bin/ruff' let g:ale_linters = { "python": ['ruff', 'mypy'] } let g:ale_fixers = {'python': ['ruff']} let g:ale_fix_on_save = 1 @@ -328,11 +328,11 @@ just like VSCode (see `the official reference `_ +* `get_info() `_ diff --git a/docs/dev/development-setup.rst b/docs/dev/development-setup.rst index 70cc2e4e295..6ce729f0012 100644 --- a/docs/dev/development-setup.rst +++ b/docs/dev/development-setup.rst @@ -100,14 +100,14 @@ is that how you launch the Web UI from the mono-repo. Installation from Source ------------------------ -For the ease of on-boarding developer experience, we provide an automated +For the ease of onboarding developer experience, we provide an automated script that installs all server-side components in editable states with just one command. Prerequisites ~~~~~~~~~~~~~ -Install the followings accordingly to your host operating system. +Install the following according to your host operating system. * `Git LFS `_ @@ -126,7 +126,7 @@ Install the followings accordingly to your host operating system. * `Pants `_ - - For pants version 2.18 and later. The following verions are released from Github Releases instead of PyPI. + - For pants version 2.18 and later. The following versions are released from Github Releases instead of PyPI. .. warning:: diff --git a/docs/dev/version-management-and-upgrades.rst b/docs/dev/version-management-and-upgrades.rst index f2cdc76fa60..be02b368560 100644 --- a/docs/dev/version-management-and-upgrades.rst +++ b/docs/dev/version-management-and-upgrades.rst @@ -34,7 +34,7 @@ Version Numbering * Generally ``backend.ai-manager 1.2.p`` is compatible with ``backend.ai-agent 1.2.q`` (where ``p`` and ``q`` are same or different integers) - * As of 22.09, this won't be guaranteed any more. All server-side core component versions should **exactly match** with others, as we release them at once from the mono-repo, even for those who do not have any code changes. + * As of 22.09, this won't be guaranteed anymore. All server-side core component versions should **exactly match** with others, as we release them at once from the mono-repo, even for those who do not have any code changes. * The client is guaranteed to be backward-compatible with the server they share the same API specification version. diff --git a/docs/index.rst b/docs/index.rst index 523c48ad36e..82b3ad2d4b6 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -10,7 +10,7 @@ From the user's perspective, Backend.AI is a cloud-like GPU powered HPC/DL appli It runs arbitrary user codes safely in resource-constrained containers. It hosts various programming languages and runtimes, such as Python 2/3, R, PHP, C/C++, Java, JavaScript, Julia, Octave, Haskell, Lua and Node.js, as well as AI-oriented libraries such as TensorFlow, Keras, Caffe, and MXNet. -From the admin's perspecetive, Backend.AI streamlines the process of assigning computing nodes, GPUs, and storage space to individual research team members. +From the admin's perspective, Backend.AI streamlines the process of assigning computing nodes, GPUs, and storage space to individual research team members. With detailed policy-based idle checks and resource limits, you no longer have to worry about exceeding the capacity of the cluster when there are high demands. Using the plugin architecture, Backend.AI also offers more advanced features such as fractional sharing of GPUs and site-specific SSO integrations, etc. for various-sized enterprise customers. diff --git a/docs/install/install-from-package/os-preparation.rst b/docs/install/install-from-package/os-preparation.rst index 2e9b257289d..0a813b926fb 100644 --- a/docs/install/install-from-package/os-preparation.rst +++ b/docs/install/install-from-package/os-preparation.rst @@ -133,10 +133,10 @@ For example, .. code-block:: console - $ curl -L "https://github.com/indygreg/python-build-standalone/releases/download/20231002/cpython-3.12.2+20240224-x86_64-unknown-linux-gnu-install_only.tar.gz" > cpython-3.12.2+20240224-x86_64-unknown-linux-gnu-install_only.tar.gz - $ tar -xf "cpython-3.12.2+20240224-x86_64-unknown-linux-gnu-install_only.tar.gz" + $ curl -L "https://github.com/indygreg/python-build-standalone/releases/download/20240713/cpython-3.12.4+20240713-x86_64-unknown-linux-gnu-install_only.tar.gz" > cpython-3.12.4+20240713-x86_64-unknown-linux-gnu-install_only.tar.gz + $ tar -xf "cpython-3.12.4+20240713-x86_64-unknown-linux-gnu-install_only.tar.gz" $ mkdir -p "/home/bai/.static-python/versions" - $ mv python "/home/bai/.static-python/versions/3.12.2" + $ mv python "/home/bai/.static-python/versions/3.12.4" Then, you can create multiple virtual environments per service. To create a virtual environment for Backend.AI Manager and activate it, for example, you may run: @@ -145,7 +145,7 @@ virtual environment for Backend.AI Manager and activate it, for example, you may $ mkdir "${HOME}/manager" $ cd "${HOME}/manager" - $ ~/.static-python/versions/3.12.2/bin/python3 -m venv .venv + $ ~/.static-python/versions/3.12.4/bin/python3 -m venv .venv $ source .venv/bin/activate $ pip install -U pip setuptools wheel diff --git a/docs/install/install-from-source.rst b/docs/install/install-from-source.rst index 9dde5d6fe3c..369c8c6efae 100644 --- a/docs/install/install-from-source.rst +++ b/docs/install/install-from-source.rst @@ -43,7 +43,7 @@ you need to manually perform the same repository cloning along with the pyenv, P Since we use the mono-repo for the core packages, there is no way to separately clone the agent sources only. Just clone the entire repository and configure/execute the agent only. - Ensure that you **also pull the LFS files and submodules** when you manually clone it. + Ensure that you **also pull the LFS files** when you manually clone it. Once your ``pants`` is up and working, run ``pants export`` to populate virtualenvs and install dependencies. diff --git a/docs/locales/ko/LC_MESSAGES/client/cli/sessions.po b/docs/locales/ko/LC_MESSAGES/client/cli/sessions.po index 9b2b11894b2..f205f4dcd2f 100644 --- a/docs/locales/ko/LC_MESSAGES/client/cli/sessions.po +++ b/docs/locales/ko/LC_MESSAGES/client/cli/sessions.po @@ -9,30 +9,30 @@ msgid "" msgstr "" "Project-Id-Version: Backend.AI Documentation 22.06\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2022-09-13 15:33+0900\n" +"POT-Creation-Date: 2023-12-08 16:08+0900\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.10.3\n" +"Generated-By: Babel 2.13.1\n" -#: ../../client/cli/sessions.rst:2 8b94c9d2206446c98cdeca60fe9fe6a7 +#: ../../client/cli/sessions.rst:2 0d3b3b3f08144e5baf7c334b98da9a6f msgid "Compute Sessions" msgstr "" -#: ../../client/cli/sessions.rst:6 6354094300d04ea48159e2f14ee27016 +#: ../../client/cli/sessions.rst:6 49f60dda559f405a87741309445a865d msgid "" "Please consult the detailed usage in the help of each command (use ``-h``" " or ``--help`` argument to display the manual)." msgstr "" -#: ../../client/cli/sessions.rst:11 e2c8ae78830147bc84202448934c60bb +#: ../../client/cli/sessions.rst:11 d4a291e1e9dc49d7883178e83c63d07a msgid "Listing sessions" msgstr "" -#: ../../client/cli/sessions.rst:13 2aa70201885e4bf7aae726e195f54f89 +#: ../../client/cli/sessions.rst:13 7c136cc77ea94c56bac6fae559772dd9 msgid "" "List the session owned by you with various status filters. The most " "recently status-changed sessions are listed first. To prevent overloading" @@ -40,136 +40,148 @@ msgid "" "provides a separate ``--all`` option to paginate further sessions." msgstr "" -#: ../../client/cli/sessions.rst:23 a7114abd28ce4bfe93fc2b367e48d709 +#: ../../client/cli/sessions.rst:23 c025dd9e06844d83b5ef350d92946eac msgid "" "The ``ps`` command is an alias of the following ``admin session list`` " "command. If you have the administrator privilege, you can list sessions " "owned by other users by adding ``--access-key`` option here." msgstr "" -#: ../../client/cli/sessions.rst:31 53a84222431941eda35c8f917e471825 +#: ../../client/cli/sessions.rst:31 afca9c9403a846d7bd0314159db04085 msgid "" "Both commands offer options to set the status filter as follows. For " "other options, please consult the output of ``--help``." msgstr "" #: ../../client/cli/sessions.rst:38 ../../client/cli/sessions.rst:57 -#: 66fdd9eec297403695d0e8ffb5da2639 f81324af7fcc434e8d975ce560fcc595 +#: 65522864c3ba4df7b1665e044b587673 e658dbaea9194c38934a1a626bb68931 msgid "Option" msgstr "" -#: ../../client/cli/sessions.rst:39 b518d00f8cb7443eadc7a4b368ccc030 +#: ../../client/cli/sessions.rst:39 7d7cff3608e34000af5a715153351358 msgid "Included Session Status" msgstr "" #: ../../client/cli/sessions.rst:41 ../../client/cli/sessions.rst:60 -#: 052e5e56458643d8aab1b574c62e9c20 c8d0b3a5ee4c4d92b2136312dc0d568b +#: 52df0477313641018ee573338c4b1a77 fcbac03b588a455f9c468e0e515f201e msgid "(no option)" msgstr "" -#: ../../client/cli/sessions.rst:42 23b48f189a53428d9002fb9c65ea2b8c +#: ../../client/cli/sessions.rst:42 4f067c02b259464f84aeade4874f054f msgid "" "``PENDING``, ``PREPARING``, ``RUNNING``, ``RESTARTING``, ``TERMINATING``," " ``RESIZING``, ``SUSPENDED``, and ``ERROR``." msgstr "" -#: ../../client/cli/sessions.rst:45 2deaa247a04143f9b25357d4fcf10ee5 +#: ../../client/cli/sessions.rst:45 c9e8de9d1c1d4d82b76690b8b3fa6cc5 msgid "``--running``" msgstr "" -#: ../../client/cli/sessions.rst:46 0c767ea8dc964938ba8937ac71424335 +#: ../../client/cli/sessions.rst:46 e9096ba3896544348e2c88ac765dc5c6 msgid "``PREPARING``, ``PULLING``, and ``RUNNING``." msgstr "" -#: ../../client/cli/sessions.rst:48 f8e31c45e3cd4e79a0dbe41b58e73fb5 +#: ../../client/cli/sessions.rst:48 7675c8771d0d4d20984e45e995ae38de msgid "``--dead``" msgstr "" -#: ../../client/cli/sessions.rst:49 da4888d0d457404692ce3d6a35df81da +#: ../../client/cli/sessions.rst:49 b3c97dfaf97b4cfaa846d1f6a8f129ef msgid "``CANCELLED`` and ``TERMINATED``." msgstr "" -#: ../../client/cli/sessions.rst:51 15522207a1fa434d91ea5722870e932d +#: ../../client/cli/sessions.rst:51 34d2a4d9e1064f1b8553a73b219f140f msgid "" "Both commands offer options to specify which fields of sessions should be" " printed as follows." msgstr "" -#: ../../client/cli/sessions.rst:58 5d741fc8ecf04d4cbad1d8cc9900204c +#: ../../client/cli/sessions.rst:58 1e01d99c903641cda06118798da44f2d msgid "Included Session Fields" msgstr "" -#: ../../client/cli/sessions.rst:61 ../../client/cli/sessions.rst:69 -#: 9e8791f4c5bf4726bc23cd134e66a41c fb853a68197b4affb8e14d517943c4b3 -msgid "``Session ID``, ``Owner``, ``Image``, ``Type``," +#: ../../client/cli/sessions.rst:61 51ecd7ba4b9649a69beb647405ecedf4 +msgid "" +"``Name``, ``Owner Access Key (admin only)``, ``Session ID``, " +"``Project/Group``," +msgstr "" + +#: ../../client/cli/sessions.rst:63 ../../client/cli/sessions.rst:74 +#: dad921c7e2b54b2fa138634a068f4304 +msgid "``Main Kernel ID``, ``Image``, ``Type``, ``Status``," msgstr "" -#: ../../client/cli/sessions.rst:63 5e5b048958324b898cdde1a8d2ac3909 -msgid "``Status``, ``Status Info``, ``Last updated``, and ``Result``." +#: ../../client/cli/sessions.rst:65 b257f3a20b3d4c4ead0749134af9c1d6 +msgid "``Status Info``, ``Last Updated``, and ``Result``." msgstr "" -#: ../../client/cli/sessions.rst:65 2aaceafa8fd24db2b5a1034b5ea51065 -msgid "``--id-only``" +#: ../../client/cli/sessions.rst:68 aa78e7b2f53a4e63bc8520dbc4071827 +msgid "``--name-only``" msgstr "" -#: ../../client/cli/sessions.rst:66 5d18c5723cf04c3db0386830d68ba93d -msgid "``Session ID``." +#: ../../client/cli/sessions.rst:69 9727079cfaed45babffeda98fcbb09c2 +msgid "``Name``." msgstr "" -#: ../../client/cli/sessions.rst:68 06627842b7a44859a7ff4da1e914c6e2 +#: ../../client/cli/sessions.rst:71 5d191c27630b4d5c8cb2a8b824ed8572 msgid "``--detail``" msgstr "" -#: ../../client/cli/sessions.rst:71 3db52e5c03e34722afbc2f628043face -msgid "``Status``, ``Status Info``, ``Last updated``, ``Result``," +#: ../../client/cli/sessions.rst:72 204d52a45d004d749ee7de1b242b047b +msgid "``Name``, ``Session ID``, ``Project/Group``," msgstr "" -#: ../../client/cli/sessions.rst:73 7473e6c28a494a14801d61b818428d1a -msgid "``Tag``, ``Created At``, ``Occupied Resource``, ``Used Memory (MiB)``," +#: ../../client/cli/sessions.rst:76 fad1369498c2442a9d271f4d30898da9 +msgid "``Status Info``, ``Last Updated``, ``Result``, ``Tag``," msgstr "" -#: ../../client/cli/sessions.rst:75 11674d926ecb4fada2f2dae2ae37422f -msgid "``Max Used Memory (MiB)``, and ``CPU Using (%)``." +#: ../../client/cli/sessions.rst:78 7480e1bdf5d04d1a8b8924debf4bac7c +msgid "``Created At``, and ``Occupying Slots``." msgstr "" -#: ../../client/cli/sessions.rst:77 18dd0bf53a9c4ce1bbec0941b56bef41 +#: ../../client/cli/sessions.rst:80 b5a57bc94cb7452ba8f10c4ddf4525e6 msgid "``-f``, ``--format``" msgstr "" -#: ../../client/cli/sessions.rst:78 790ae9fc16f94f65b74b60f9c3496e12 +#: ../../client/cli/sessions.rst:81 4fb6ba0f9011472784989ba0a3102104 msgid "Specified fields by user." msgstr "" -#: ../../client/cli/sessions.rst:81 5fe233638b744781b9967916b5137999 +#: ../../client/cli/sessions.rst:84 5f9d9cf84442408b9c4573201d624ecb msgid "" "Fields for ``-f/--format`` option can be displayed by specifying comma-" "separated parameters." msgstr "" -#: ../../client/cli/sessions.rst:83 01cd9760982e45b78a2dad128cb909c5 +#: ../../client/cli/sessions.rst:86 9672c105dff24506ad2a9149bf742538 msgid "" -"Available parameters for this option are: ``id``, ``status``, " -"``status_info``, ``created_at``, ``last_updated``, ``result``, ``image``," -" ``type``, ``task_id``, ``tag``, ``occupied_slots``, ``used_memory``, " -"``max_used_memory``, ``cpu_using``." -msgstr "" - -#: ../../client/cli/sessions.rst:85 8ecaacac4365447fb8dd24f34a6cf2f1 +"Available parameters for this option are: ``id (session_id)``, " +"``main_kernel_id``, ``tag``, ``name``, ``type``, ``image``, ``registry``," +" ``cluster_template (reserved for future release)``, ``cluster_mode``, " +"``cluster_size``, ``domain_name``, ``group_name``, ``group_id``, " +"``agent_ids``, ``user_email``, ``user_id``, ``access_key``, ``status``, " +"``status_info``, ``status_changed``, ``created_at``, ``terminated_at``, " +"``starts_at``, ``scheduled_at``, ``startup_command``, ``result``, " +"``resource_opts``, ``scaling_group``, ``service_ports``, ``mounts``, " +"``occupying_slots``, ``dependencies``, ``abusing_reports``, " +"``idle_checks``." +msgstr "" + +#: ../../client/cli/sessions.rst:88 4a7fdc7e2798496eaa135e2bcb3a4d8d msgid "For example:" msgstr "" -#: ../../client/cli/sessions.rst:94 55048cb7320f4b0e94674ce5681065f9 +#: ../../client/cli/sessions.rst:97 2380cf3a412842bd8f3d62aff2c256ae msgid "Running simple sessions" msgstr "" -#: ../../client/cli/sessions.rst:96 ee2a4128e0d2493fb9382ea83359e73d +#: ../../client/cli/sessions.rst:99 ccc28a81d72e4e51a94dd5d63aec9cef msgid "" "The following command spawns a Python session and executes the code " "passed as ``-c`` argument immediately. ``--rm`` option states that the " "client automatically terminates the session after execution finishes." msgstr "" -#: ../../client/cli/sessions.rst:107 79fc18a3a4f9421f9f1aae8a24c2d3e7 +#: ../../client/cli/sessions.rst:110 c83fc90146144906b508cf3bd7644051 msgid "" "By default, you need to specify language with full version tag like " "``python:3.6-ubuntu18.04``. Depending on the Backend.AI admin's language " @@ -177,33 +189,33 @@ msgid "" "know defined language aliases, contact the admin of Backend.AI server." msgstr "" -#: ../../client/cli/sessions.rst:113 288298de245e4189962ca4ab2b2044ca +#: ../../client/cli/sessions.rst:116 8be0c6fd03fc470dbc2fb14d6174f207 msgid "" "The following command spawns a Python session and executes the code " "passed as ``./myscript.py`` file, using the shell command specified in " "the ``--exec`` option." msgstr "" -#: ../../client/cli/sessions.rst:123 c7977eda32564f7d89c590b425beca38 +#: ../../client/cli/sessions.rst:126 4a021c9ab0984eda8dd51b7c93cab8e1 msgid "" "Please note that your ``run`` command may hang up for a very long time " "due to queueing when the cluster resource is not sufficiently available." msgstr "" -#: ../../client/cli/sessions.rst:126 0852a3db1996474c98767891ca6e8bce +#: ../../client/cli/sessions.rst:129 a4d71a7a4c2e461588db57173edd11e3 msgid "" "To avoid indefinite waiting, you may add ``--enqueue-only`` to return " "immediately after posting the session creation request." msgstr "" -#: ../../client/cli/sessions.rst:131 169685c4fc09496d80f394696272a3d1 +#: ../../client/cli/sessions.rst:134 3a6f11c09f1244ff9a10c0968afa158f msgid "" "When using ``--enqueue-only``, the codes are *NOT* executed and relevant " "options are ignored. This makes the ``run`` command to the same of the " "``start`` command." msgstr "" -#: ../../client/cli/sessions.rst:135 1c584c400b4e48829965cb488388085e +#: ../../client/cli/sessions.rst:138 fb67913dd523450aa0e8766f2c379a92 msgid "" "Or, you may use ``--max-wait`` option to limit the maximum waiting time. " "If the session starts within the given ``--max-wait`` seconds, it works " @@ -211,35 +223,35 @@ msgid "" "``--enqueue-only``." msgstr "" -#: ../../client/cli/sessions.rst:140 c10fad6e09264f4cae3443788b77d746 +#: ../../client/cli/sessions.rst:143 1b47da4f14d84360bfc3280ab5c601e2 msgid "" "To watch what is happening behind the scene until the session starts, try" " ``backend.ai events `` to receive the lifecycle events such " "as its scheduling and preparation steps." msgstr "" -#: ../../client/cli/sessions.rst:146 5f52a05c05de467695d05007c70df4df +#: ../../client/cli/sessions.rst:149 114d64c15f1f43409534d0c91ba21795 msgid "Running sessions with accelerators" msgstr "" -#: ../../client/cli/sessions.rst:148 c26fbf52a2ea4b12ade0454b48595909 +#: ../../client/cli/sessions.rst:151 5f6cc6de93234c4d839369c9185f316e msgid "" "Use one or more ``-r`` options to specify resource requirements when " "using ``backend.ai run`` and ``backend.ai start`` commands." msgstr "" -#: ../../client/cli/sessions.rst:151 4b3208770b51410c973fa4e4427dfb3e +#: ../../client/cli/sessions.rst:154 526b569b28424f85b77a91c9e7151927 msgid "" "For instance, the following command spawns a Python TensorFlow session " "using a half of virtual GPU device, 4 CPU cores, and 8 GiB of the main " "memory to execute ``./mygpucode.py`` file inside it." msgstr "" -#: ../../client/cli/sessions.rst:163 8906c5399b744d479170cefc044a9325 +#: ../../client/cli/sessions.rst:166 21d0d6c1d14b44bdb228b3611f34308f msgid "Terminating or cancelling sessions" msgstr "" -#: ../../client/cli/sessions.rst:165 34bbde283dfe49b5878d0db1bd7d6827 +#: ../../client/cli/sessions.rst:168 84c719d55671456aa9df08bad8c173ef msgid "" "Without ``--rm`` option, your session remains alive for a configured " "amount of idle timeout (default is 30 minutes). You can see such sessions" @@ -248,7 +260,7 @@ msgid "" " session IDs to terminate them at once." msgstr "" -#: ../../client/cli/sessions.rst:175 8d284f48a5574b239370ceda82c26482 +#: ../../client/cli/sessions.rst:178 b679923c49934d188858c608ff806499 msgid "" "If you terminate ``PENDING`` sessions which are not scheduled yet, they " "are cancelled." @@ -263,3 +275,40 @@ msgstr "" #~ "``--access-key`` option here." #~ msgstr "" +#~ msgid "``Session ID``, ``Owner``, ``Image``, ``Type``," +#~ msgstr "" + +#~ msgid "``Status``, ``Status Info``, ``Last updated``, and ``Result``." +#~ msgstr "" + +#~ msgid "``--id-only``" +#~ msgstr "" + +#~ msgid "``Session ID``." +#~ msgstr "" + +#~ msgid "``Status``, ``Status Info``, ``Last updated``, ``Result``," +#~ msgstr "" + +#~ msgid "``Tag``, ``Created At``, ``Occupied Resource``, ``Used Memory (MiB)``," +#~ msgstr "" + +#~ msgid "``Max Used Memory (MiB)``, and ``CPU Using (%)``." +#~ msgstr "" + +#~ msgid "" +#~ "Available parameters for this option " +#~ "are: ``id``, ``status``, ``status_info``, " +#~ "``created_at``, ``last_updated``, ``result``, " +#~ "``image``, ``type``, ``task_id``, ``tag``, " +#~ "``occupied_slots``, ``used_memory``, ``max_used_memory``," +#~ " ``cpu_using``." +#~ msgstr "" + +#~ msgid "" +#~ "Available parameters for this option " +#~ "are: ``id``, ``status``, ``status_info``, " +#~ "``created_at``, ``status_changed``, ``result``, " +#~ "``image``, ``type``, ``tag``, ``occupying_slots``." +#~ msgstr "" + diff --git a/docs/locales/ko/LC_MESSAGES/install/install-from-source.po b/docs/locales/ko/LC_MESSAGES/install/install-from-source.po index d9b34ec0517..3422d03a390 100644 --- a/docs/locales/ko/LC_MESSAGES/install/install-from-source.po +++ b/docs/locales/ko/LC_MESSAGES/install/install-from-source.po @@ -82,7 +82,7 @@ msgid "" "Since we use the mono-repo for the core packages, there is no way to " "separately clone the agent sources only. Just clone the entire repository" " and configure/execute the agent only. Ensure that you **also pull the " -"LFS files and submodules** when you manually clone it." +"LFS files** when you manually clone it." msgstr "" #: ../../install/install-from-source.rst:48 e9866a26ff964ef49c5d0ad653343e14 diff --git a/docs/release-notes/24.03.md b/docs/release-notes/24.03.md new file mode 100644 index 00000000000..784ee0a5f9a --- /dev/null +++ b/docs/release-notes/24.03.md @@ -0,0 +1,29 @@ +# What's New + +## Neo WebUI + +This version introduces a next-generation WebUI preview with the dark-mode support. +You may toggle the switch in the session list view to use the "neo" session list and launcher, +which provides more streamlined UI. + + +## TUI Installer + +From this release, we ship an open-source version TUI installer as a self-contained single-binary executable. +This installer is packaged using [the science project](https://github.com/a-scie) and provides a terminal-based GUI (aka TUI) for easier setup. + + +## Model Store + +It introduces a globally shared predefined "model-store" project where users may clone the model vfolders to their accounts. + + +## VFolder Trash Bin + +When users delete a vfolder, it is now sent to the trash bin instead of immediately removing all its contents. +This allows users or admins to undo accidental deletions, and decouples the storage backend's directory removal process from the vfolder management system. + + +## User-defined Image Commit + +When allowed by the administrator, users may commit a running session's main container as a new container image. diff --git a/docs/release-notes/24.09.md b/docs/release-notes/24.09.md new file mode 100644 index 00000000000..00a8a162320 --- /dev/null +++ b/docs/release-notes/24.09.md @@ -0,0 +1,47 @@ +# What's New + +## Model Serving + +(TODO) + + + +## Model Store + +This release upgrades the model store with the metadata browser and improved UI. +Users can now import and run a model directly from configured external model repository services like HuggingFace. + + +## NVIDIA NIM Integration + +If users have a valid license for NVIDIA NIM, they can launch a NIM container with one click as a model service. + + +## Fine-grained Access Control + +(TODO) + + + +## FastTrack + +(TODO) + + + +## Neo WebUI + +This version continues transition to the next-generation WebUI. +(TODO: more details) + + + diff --git a/fixtures/manager/example-users.json b/fixtures/manager/example-users.json index 56a0671e5b5..053058df98d 100644 --- a/fixtures/manager/example-users.json +++ b/fixtures/manager/example-users.json @@ -59,7 +59,9 @@ "driver": "static", "driver_opts": {}, "scheduler": "fifo", - "scheduler_opts": {} + "scheduler_opts": {}, + "wsproxy_addr": "http://127.0.0.1:5050", + "wsproxy_api_token": "v625xZLOgbMHhl0s49VuqQ" } ], "sgroups_for_domains": [ diff --git a/pants.toml b/pants.toml index e09405b22b0..89fb660318a 100644 --- a/pants.toml +++ b/pants.toml @@ -36,7 +36,6 @@ root_patterns = [ "/", "/src", "/stubs", - "/tests", "/tools/pants-plugins", ] @@ -53,9 +52,9 @@ enable_resolves = true # * Let other developers do: # - Run `pants export` again # - Update their local IDE/editor's interpreter path configurations -interpreter_constraints = ["CPython==3.12.2"] +interpreter_constraints = ["CPython==3.12.4"] tailor_pex_binary_targets = false -pip_version = "24.0" +pip_version = "24.1.2" [python-bootstrap] search_path = [""] @@ -84,13 +83,13 @@ setuptools = "tools/setuptools.lock" [pex-cli] # Pants 2.21.0 uses Pex 2.3.1 by default. -# version = "v2.3.0" -# known_versions = [ -# "v2.3.0|macos_arm64|581f7c2d61b4c24c66ba241f2a37d8f3b552f24ed22543279860f3463ac3db35|4124506", -# "v2.3.0|macos_x86_64|581f7c2d61b4c24c66ba241f2a37d8f3b552f24ed22543279860f3463ac3db35|4124506", -# "v2.3.0|linux_arm64|581f7c2d61b4c24c66ba241f2a37d8f3b552f24ed22543279860f3463ac3db35|4124506", -# "v2.3.0|linux_x86_64|581f7c2d61b4c24c66ba241f2a37d8f3b552f24ed22543279860f3463ac3db35|4124506", -# ] +version = "v2.10.0" +known_versions = [ + "v2.10.0|macos_arm64|de2e75c6528009051331e81e57cf05d460d0a8a3411fa9cd0b7b0ffb5d3fc23e|4170525", + "v2.10.0|macos_x86_64|de2e75c6528009051331e81e57cf05d460d0a8a3411fa9cd0b7b0ffb5d3fc23e|4170525", + "v2.10.0|linux_arm64|de2e75c6528009051331e81e57cf05d460d0a8a3411fa9cd0b7b0ffb5d3fc23e|4170525", + "v2.10.0|linux_x86_64|de2e75c6528009051331e81e57cf05d460d0a8a3411fa9cd0b7b0ffb5d3fc23e|4170525", +] # When trying a new pex version, you could find out the hash and size-in-bytes as follows: # $ curl -s -L https://github.com/pantsbuild/pex/releases/download/v2.1.99/pex | tee >(wc -c) >(shasum -a 256) >/dev/null diff --git a/pyproject.toml b/pyproject.toml index 0730b64a812..7e4c19f76e2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [project] -requires-python = "~=3.12.2" +requires-python = "~=3.12.4" [tool.towncrier] package = "ai.backend.manager" # reference point for getting __version__ @@ -106,5 +106,5 @@ implicit_optional = true # FIXME: remove after adding https://github.com/haunts mypy_path = "stubs:src:tools/pants-plugins" namespace_packages = true explicit_package_bases = true -python_executable = "dist/export/python/virtualenvs/python-default/3.12.2/bin/python" +python_executable = "dist/export/python/virtualenvs/python-default/3.12.4/bin/python" disable_error_code = ["typeddict-unknown-key"] diff --git a/python-kernel.lock b/python-kernel.lock index 96793c443a6..36f06e45b02 100644 --- a/python-kernel.lock +++ b/python-kernel.lock @@ -6,7 +6,7 @@ // { // "version": 3, // "valid_for_interpreter_constraints": [ -// "CPython==3.12.2" +// "CPython==3.12.4" // ], // "generated_with_requirements": [ // "attrs~=23.2", @@ -154,13 +154,13 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "3b7bd22f058434e3b9a7ea4b1500ed47de2713872288c0d511d19926f99b459f", - "url": "https://files.pythonhosted.org/packages/75/6d/d7b55b9c1ac802ab066b3e5015e90faab1fffbbd67a2af498ffc6cc81c97/jupyter_client-8.6.1-py3-none-any.whl" + "hash": "50cbc5c66fd1b8f65ecb66bc490ab73217993632809b6e505687de18e9dea39f", + "url": "https://files.pythonhosted.org/packages/cf/d3/c4bb02580bc0db807edb9a29b2d0c56031be1ef0d804336deb2699a470f6/jupyter_client-8.6.2-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "e842515e2bab8e19186d89fdfea7abd15e39dd581f94e399f00e2af5a1652d3f", - "url": "https://files.pythonhosted.org/packages/cd/5a/f4583ba60733b5a1cb1379acdf7c710fb1fb4d9df936a740f8cadf25d853/jupyter_client-8.6.1.tar.gz" + "hash": "2bda14d55ee5ba58552a8c53ae43d215ad9868853489213f37da060ced54d8df", + "url": "https://files.pythonhosted.org/packages/ff/61/3cd51dea7878691919adc34ff6ad180f13bfe25fb8c7662a9ee6dc64e643/jupyter_client-8.6.2.tar.gz" } ], "project_name": "jupyter-client", @@ -178,7 +178,7 @@ "pytest-cov; extra == \"test\"", "pytest-jupyter[client]>=0.4.1; extra == \"test\"", "pytest-timeout; extra == \"test\"", - "pytest; extra == \"test\"", + "pytest<8.2.0; extra == \"test\"", "python-dateutil>=2.8.2", "pyzmq>=23.0", "sphinx-autodoc-typehints; extra == \"docs\"", @@ -189,7 +189,7 @@ "traitlets>=5.3" ], "requires_python": ">=3.8", - "version": "8.6.1" + "version": "8.6.2" }, { "artifacts": [ @@ -286,13 +286,13 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068", - "url": "https://files.pythonhosted.org/packages/55/72/4898c44ee9ea6f43396fbc23d9bfaf3d06e01b83698bdf2e4c919deceb7c/platformdirs-4.2.0-py3-none-any.whl" + "hash": "2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee", + "url": "https://files.pythonhosted.org/packages/68/13/2aa1f0e1364feb2c9ef45302f387ac0bd81484e9c9a4c5688a322fbdfd08/platformdirs-4.2.2-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768", - "url": "https://files.pythonhosted.org/packages/96/dc/c1d911bf5bb0fdc58cc05010e9f3efe3b67970cef779ba7fbc3183b987a8/platformdirs-4.2.0.tar.gz" + "hash": "38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3", + "url": "https://files.pythonhosted.org/packages/f5/52/0763d1d976d5c262df53ddda8d8d4719eedf9594d046f117c25a27261a19/platformdirs-4.2.2.tar.gz" } ], "project_name": "platformdirs", @@ -300,6 +300,7 @@ "appdirs==1.4.4; extra == \"test\"", "covdefaults>=2.3; extra == \"test\"", "furo>=2023.9.10; extra == \"docs\"", + "mypy>=1.8; extra == \"type\"", "proselint>=0.13; extra == \"docs\"", "pytest-cov>=4.1; extra == \"test\"", "pytest-mock>=3.12; extra == \"test\"", @@ -308,7 +309,7 @@ "sphinx>=7.2.6; extra == \"docs\"" ], "requires_python": ">=3.8", - "version": "4.2.0" + "version": "4.2.2" }, { "artifacts": [ @@ -495,66 +496,66 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f", - "url": "https://files.pythonhosted.org/packages/25/a3/1025f561b87b3cca6f66da149ba7ce4c2bb18d7bd6b84cd5a13a274e9dd3/tornado-6.4-cp38-abi3-musllinux_1_1_x86_64.whl" + "hash": "a02a08cc7a9314b006f653ce40483b9b3c12cda222d6a46d4ac63bb6c9057698", + "url": "https://files.pythonhosted.org/packages/94/d4/f8ac1f5bd22c15fad3b527e025ce219bd526acdbd903f52053df2baecc8b/tornado-6.4.1-cp38-abi3-musllinux_1_2_x86_64.whl" }, { "algorithm": "sha256", - "hash": "e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579", - "url": "https://files.pythonhosted.org/packages/0e/76/aca8c8726d045c1c7b093cca3c5551e8df444ef74ba0dfd1f205da1f95db/tornado-6.4-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl" + "hash": "163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8", + "url": "https://files.pythonhosted.org/packages/00/d9/c33be3c1a7564f7d42d87a8d186371a75fd142097076767a5c27da941fef/tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl" }, { "algorithm": "sha256", - "hash": "27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263", - "url": "https://files.pythonhosted.org/packages/34/7a/e7ec972db24513ea69ac7132c1a5eef62309dc978566a4afffa314417a45/tornado-6.4-cp38-abi3-macosx_10_9_x86_64.whl" + "hash": "e2e20b9113cd7293f164dc46fffb13535266e713cdb87bd2d15ddb336e96cfc4", + "url": "https://files.pythonhosted.org/packages/13/cf/786b8f1e6fe1c7c675e79657448178ad65e41c1c9765ef82e7f6f765c4c5/tornado-6.4.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl" }, { "algorithm": "sha256", - "hash": "02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0", - "url": "https://files.pythonhosted.org/packages/4a/2e/3ba930e3af171847d610e07ae878e04fcb7e4d7822f1a2d29a27b128ea24/tornado-6.4-cp38-abi3-macosx_10_9_universal2.whl" + "hash": "613bf4ddf5c7a95509218b149b555621497a6cc0d46ac341b30bd9ec19eac7f3", + "url": "https://files.pythonhosted.org/packages/22/d4/54f9d12668b58336bd30defe0307e6c61589a3e687b05c366f804b7faaf0/tornado-6.4.1-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl" }, { "algorithm": "sha256", - "hash": "f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e", - "url": "https://files.pythonhosted.org/packages/62/e5/3ee2ba523a13bae5c17d1658580d13597116c1639374ca5033d58fd04724/tornado-6.4-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl" + "hash": "6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14", + "url": "https://files.pythonhosted.org/packages/2e/0f/721e113a2fac2f1d7d124b3279a1da4c77622e104084f56119875019ffab/tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl" }, { "algorithm": "sha256", - "hash": "fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2", - "url": "https://files.pythonhosted.org/packages/66/e5/466aa544e0cbae9b0ece79cd42db257fa7bfa3197c853e3f7921b3963190/tornado-6.4-cp38-abi3-musllinux_1_1_aarch64.whl" + "hash": "454db8a7ecfcf2ff6042dde58404164d969b6f5d58b926da15e6b23817950fc4", + "url": "https://files.pythonhosted.org/packages/71/63/c8fc62745e669ac9009044b889fc531b6f88ac0f5f183cac79eaa950bb23/tornado-6.4.1-cp38-abi3-musllinux_1_2_i686.whl" }, { "algorithm": "sha256", - "hash": "f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212", - "url": "https://files.pythonhosted.org/packages/9f/12/11d0a757bb67278d3380d41955ae98527d5ad18330b2edbdc8de222b569b/tornado-6.4-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl" + "hash": "25486eb223babe3eed4b8aecbac33b37e3dd6d776bc730ca14e1bf93888b979f", + "url": "https://files.pythonhosted.org/packages/cf/3f/2c792e7afa7dd8b24fad7a2ed3c2f24a5ec5110c7b43a64cb6095cc106b8/tornado-6.4.1-cp38-abi3-musllinux_1_2_aarch64.whl" }, { "algorithm": "sha256", - "hash": "72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee", - "url": "https://files.pythonhosted.org/packages/bd/a2/ea124343e3b8dd7712561fe56c4f92eda26865f5e1040b289203729186f2/tornado-6.4.tar.gz" + "hash": "8ae50a504a740365267b2a8d1a90c9fbc86b780a39170feca9bcc1787ff80842", + "url": "https://files.pythonhosted.org/packages/e4/8e/a6ce4b8d5935558828b0f30f3afcb2d980566718837b3365d98e34f6067e/tornado-6.4.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl" }, { "algorithm": "sha256", - "hash": "88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78", - "url": "https://files.pythonhosted.org/packages/e2/40/bcf0af5a29a850bf5ad7f79ef51c054f99e18d9cdf4efd6eeb0df819641f/tornado-6.4-cp38-abi3-musllinux_1_1_i686.whl" + "hash": "92d3ab53183d8c50f8204a51e6f91d18a15d5ef261e84d452800d4ff6fc504e9", + "url": "https://files.pythonhosted.org/packages/ee/66/398ac7167f1c7835406888a386f6d0d26ee5dbf197d8a571300be57662d3/tornado-6.4.1.tar.gz" } ], "project_name": "tornado", "requires_dists": [], "requires_python": ">=3.8", - "version": "6.4" + "version": "6.4.1" }, { "artifacts": [ { "algorithm": "sha256", - "hash": "fcdf85684a772ddeba87db2f398ce00b40ff550d1528c03c14dbf6a02003cd80", - "url": "https://files.pythonhosted.org/packages/7c/c4/366a09036c07f46eb8c9b2af39c97f502ef24f11f2a6e4d763655d9f2708/traitlets-5.14.2-py3-none-any.whl" + "hash": "b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", + "url": "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "8cdd83c040dab7d1dee822678e5f5d100b514f7b72b01615b26fc5718916fdf9", - "url": "https://files.pythonhosted.org/packages/4f/97/d957b3a5f6da825cbbb6a02e584bcab769ea2c2a9ad67a9cc25b4bbafb30/traitlets-5.14.2.tar.gz" + "hash": "9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", + "url": "https://files.pythonhosted.org/packages/eb/79/72064e6a701c2183016abbbfedaba506d81e30e232a68c9f0d6f6fcd1574/traitlets-5.14.3.tar.gz" } ], "project_name": "traitlets", @@ -566,11 +567,11 @@ "pydata-sphinx-theme; extra == \"docs\"", "pytest-mock; extra == \"test\"", "pytest-mypy-testing; extra == \"test\"", - "pytest<8.1,>=7.0; extra == \"test\"", + "pytest<8.2,>=7.0; extra == \"test\"", "sphinx; extra == \"docs\"" ], "requires_python": ">=3.8", - "version": "5.14.2" + "version": "5.14.3" }, { "artifacts": [ @@ -594,19 +595,19 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a", - "url": "https://files.pythonhosted.org/packages/01/f3/936e209267d6ef7510322191003885de524fc48d1b43269810cd589ceaf5/typing_extensions-4.11.0-py3-none-any.whl" + "hash": "04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", + "url": "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0", - "url": "https://files.pythonhosted.org/packages/f6/f3/b827b3ab53b4e3d8513914586dcca61c355fa2ce8252dea4da56e67bf8f2/typing_extensions-4.11.0.tar.gz" + "hash": "1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", + "url": "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz" } ], "project_name": "typing-extensions", "requires_dists": [], "requires_python": ">=3.8", - "version": "4.11.0" + "version": "4.12.2" }, { "artifacts": [ @@ -670,7 +671,7 @@ "only_builds": [], "only_wheels": [], "path_mappings": {}, - "pex_version": "2.3.0", + "pex_version": "2.3.1", "pip_version": "24.0", "prefer_older_binary": false, "requirements": [ @@ -684,7 +685,7 @@ "uvloop~=0.19" ], "requires_python": [ - "==3.12.2" + "==3.12.4" ], "resolver_version": "pip-2020-resolver", "style": "universal", diff --git a/python.lock b/python.lock index 69a42e54cd8..1687c624269 100644 --- a/python.lock +++ b/python.lock @@ -6,7 +6,7 @@ // { // "version": 3, // "valid_for_interpreter_constraints": [ -// "CPython==3.12.2" +// "CPython==3.12.4" // ], // "generated_with_requirements": [ // "Jinja2~=3.1.2", @@ -15,9 +15,10 @@ // "SQLAlchemy[postgresql_asyncpg]~=1.4.40", // "aiodataloader-ng~=0.2.1", // "aiodns>=3.0", -// "aiodocker~=0.21.0", +// "aiodocker==0.22.1", // "aiofiles~=23.2.1", // "aiohttp_cors~=0.7", +// "aiohttp_jinja2~=1.6", // "aiohttp_sse>=2.0", // "aiohttp~=3.9.1", // "aiomonitor~=0.7.0", @@ -52,7 +53,7 @@ // "hiredis>=2.2.3", // "humanize>=3.1.0", // "ifaddr~=0.2", -// "inquirer~=2.9.2", +// "inquirer~=3.3.0", // "janus~=1.0.0", // "jupyter-client>=6.0", // "kubernetes-asyncio~=9.1.0", @@ -68,6 +69,7 @@ // "psutil~=5.9.1", // "pycryptodome>=3.14.1", // "pydantic~=2.6.4", +// "pyhumps~=3.8.0", // "pytest-dependency>=0.5.1", // "pytest>=7.3.1", // "python-dateutil>=2.8", @@ -77,6 +79,7 @@ // "redis[hiredis]==4.5.5", // "rich~=13.6", // "setproctitle~=1.3.2", +// "setuptools~=70.3.0", // "tabulate~=0.8.9", // "temporenc~=0.1.0", // "tenacity>=8.0", @@ -114,6 +117,7 @@ "allow_wheels": true, "build_isolation": true, "constraints": [], + "excluded": [], "locked_resolves": [ { "locked_requirements": [ @@ -121,19 +125,19 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "ee799435f77e8c3a2a7207c465feae2343a2aa537c38e8f56b629c8a321a02d0", - "url": "https://files.pythonhosted.org/packages/f7/39/b392dc1a8bb58342deacc1ed2b00edf88fd357e6fdf76cc6c8046825f84f/aioconsole-0.7.0-py3-none-any.whl" + "hash": "1867a7cc86897a87398e6e6fba302738548f1cf76cbc6c76e06338e091113bdc", + "url": "https://files.pythonhosted.org/packages/08/17/98008117ec3f484259f11a8a96cb5601949546a4de43102b99cffa1138e5/aioconsole-0.7.1-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "c702d24406378d37d9873f91e03ce71520bac503d5ab03f81d8b563ff010bd54", - "url": "https://files.pythonhosted.org/packages/85/da/6a238a72274fa338b2ff20007f026944a6721245fa65d3bd4adeb83be419/aioconsole-0.7.0.tar.gz" + "hash": "a3e52428d32623c96746ec3862d97483c61c12a2f2dfba618886b709415d4533", + "url": "https://files.pythonhosted.org/packages/e5/f4/f156826819b4136b3fe9fac1b7707f6f241c871aaef13b4a16932e39156d/aioconsole-0.7.1.tar.gz" } ], "project_name": "aioconsole", "requires_dists": [], "requires_python": ">=3.8", - "version": "0.7.0" + "version": "0.7.1" }, { "artifacts": [ @@ -187,22 +191,40 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "6fe00135bb7dc40a407669d3157ecdfd856f3737d939df54f40a479d40cf7bdc", - "url": "https://files.pythonhosted.org/packages/7e/86/97638ef9d0e54a86d389ded8ccf27cc1ecabf7ce27ae873636a5c1e46d89/aiodocker-0.21.0-py3-none-any.whl" + "hash": "4e42d6e6cbf8f2afb431b77208be0f7e81d07eb38c8176f18d58761115b2acf5", + "url": "https://files.pythonhosted.org/packages/55/f6/48a88b2aaf6a3ecade21467770f17bca5716b7cb5446e8d41bde6d300a4a/aiodocker-0.22.1-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "1f2e6db6377195962bb676d4822f6e3a0c525e1b5d60b8ebbab68230bff3d227", - "url": "https://files.pythonhosted.org/packages/6f/f5/5fb3a17fcdd31d3cce9afa82c306da869e2b36c5ca1477224396e5e1f31b/aiodocker-0.21.0.tar.gz" + "hash": "fb969fdf2ac574d800ddf132940e0337fe459e191b8024ef66ec8328effa63ce", + "url": "https://files.pythonhosted.org/packages/55/5e/9e61e7604ac586c9f0b0fa7f8b6a0c25e8b15c9478ca55969cc43f258167/aiodocker-0.22.1.tar.gz" } ], "project_name": "aiodocker", "requires_dists": [ - "aiohttp>=3.6", - "typing-extensions>=3.6.5" + "aiohttp==3.9.5; extra == \"ci\"", + "aiohttp>=3.8", + "alabaster==0.7.16; extra == \"doc\"", + "async-timeout==4.0.3; extra == \"ci\"", + "codecov==2.1.13; extra == \"dev\"", + "multidict==6.0.5; extra == \"ci\"", + "mypy==1.10.1; extra == \"dev\"", + "packaging==24.1; extra == \"dev\"", + "pre-commit>=3.5.0; extra == \"dev\"", + "pytest-asyncio==0.23.7; extra == \"dev\"", + "pytest-cov==5.0.0; extra == \"dev\"", + "pytest-sugar==1.0.0; extra == \"dev\"", + "pytest==8.2.2; extra == \"dev\"", + "ruff-lsp==0.0.54; extra == \"dev\"", + "ruff==0.5.0; extra == \"dev\"", + "sphinx-autodoc-typehints==2.2.2; extra == \"doc\"", + "sphinx==7.3.7; extra == \"doc\"", + "sphinxcontrib-asyncio==0.3.0; extra == \"doc\"", + "towncrier==23.11.0; extra == \"dev\"", + "yarl==1.9.4; extra == \"ci\"" ], - "requires_python": ">=3.6", - "version": "0.21.0" + "requires_python": ">=3.8.0", + "version": "0.22.1" }, { "artifacts": [ @@ -331,6 +353,27 @@ "requires_python": null, "version": "0.7.0" }, + { + "artifacts": [ + { + "algorithm": "sha256", + "hash": "0df405ee6ad1b58e5a068a105407dc7dcc1704544c559f1938babde954f945c7", + "url": "https://files.pythonhosted.org/packages/eb/90/65238d4246307195411b87a07d03539049819b022c01bcc773826f600138/aiohttp_jinja2-1.6-py3-none-any.whl" + }, + { + "algorithm": "sha256", + "hash": "a3a7ff5264e5bca52e8ae547bbfd0761b72495230d438d05b6c0915be619b0e2", + "url": "https://files.pythonhosted.org/packages/e6/39/da5a94dd89b1af7241fb7fc99ae4e73505b5f898b540b6aba6dc7afe600e/aiohttp-jinja2-1.6.tar.gz" + } + ], + "project_name": "aiohttp-jinja2", + "requires_dists": [ + "aiohttp>=3.9.0", + "jinja2>=3.0.0" + ], + "requires_python": ">=3.8", + "version": "1.6" + }, { "artifacts": [ { @@ -523,13 +566,13 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "2edcc97bed0bd3272611ce3a98d98279e9c209e7186e43e75bbb1b2bdfdbcc43", - "url": "https://files.pythonhosted.org/packages/7f/50/9fb3a5c80df6eb6516693270621676980acd6d5a9a7efdbfa273f8d616c7/alembic-1.13.1-py3-none-any.whl" + "hash": "6b8733129a6224a9a711e17c99b08462dbf7cc9670ba8f2e2ae9af860ceb1953", + "url": "https://files.pythonhosted.org/packages/df/ed/c884465c33c25451e4a5cd4acad154c29e5341e3214e220e7f3478aa4b0d/alembic-1.13.2-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "4932c8558bf68f2ee92b9bbcb8218671c627064d5b08939437af6d77dc05e595", - "url": "https://files.pythonhosted.org/packages/7b/24/ddce068e2ac9b5581bd58602edb2a1be1b0752e1ff2963c696ecdbe0470d/alembic-1.13.1.tar.gz" + "hash": "1ff0ae32975f4fd96028c39ed9bb3c867fe3af956bd7bb37343b54c9fe7445ef", + "url": "https://files.pythonhosted.org/packages/66/e2/efa88e86029cada2da5941ec664d50d9a3b2a91f5066405c6f90e5016c16/alembic-1.13.2.tar.gz" } ], "project_name": "alembic", @@ -542,7 +585,7 @@ "typing-extensions>=4" ], "requires_python": ">=3.8", - "version": "1.13.1" + "version": "1.13.2" }, { "artifacts": [ @@ -573,13 +616,13 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43", - "url": "https://files.pythonhosted.org/packages/28/78/d31230046e58c207284c6b2c4e8d96e6d3cb4e52354721b944d3e1ee4aa5/annotated_types-0.6.0-py3-none-any.whl" + "hash": "1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", + "url": "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d", - "url": "https://files.pythonhosted.org/packages/67/fe/8c7b275824c6d2cd17c93ee85d0ee81c090285b6d52f4876ccc47cf9c3c4/annotated_types-0.6.0.tar.gz" + "hash": "aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", + "url": "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz" } ], "project_name": "annotated-types", @@ -587,7 +630,7 @@ "typing-extensions>=4.0.0; python_version < \"3.9\"" ], "requires_python": ">=3.8", - "version": "0.6.0" + "version": "0.7.0" }, { "artifacts": [ @@ -817,98 +860,98 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "69057b9fc5093ea1ab00dd24ede891f3e5e65bee040395fb1e66ee196f9c9b4a", - "url": "https://files.pythonhosted.org/packages/85/23/756228cbc426049c264c86d163ec1b4fb1b06114f26b25fb63132af56126/bcrypt-4.1.2-cp39-abi3-musllinux_1_2_x86_64.whl" + "hash": "6717543d2c110a155e6821ce5670c1f512f602eabb77dba95717ca76af79867d", + "url": "https://files.pythonhosted.org/packages/9c/64/a016d23b6f513282d8b7f9dd91342929a2e970b2e2c2576d9b76f8f2ee5a/bcrypt-4.1.3-cp39-abi3-musllinux_1_2_x86_64.whl" }, { "algorithm": "sha256", - "hash": "ba55e40de38a24e2d78d34c2d36d6e864f93e0d79d0b6ce915e4335aa81d01b1", - "url": "https://files.pythonhosted.org/packages/05/76/6232380b99d85a2154ae06966b4bf6ce805878a7a92c3211295063b0b6be/bcrypt-4.1.2-cp39-abi3-musllinux_1_1_aarch64.whl" + "hash": "8cbb119267068c2581ae38790e0d1fbae65d0725247a930fc9900c285d95725d", + "url": "https://files.pythonhosted.org/packages/0f/e8/183ead5dd8124e463d0946dfaf86c658225adde036aede8384d21d1794d0/bcrypt-4.1.3-cp37-abi3-musllinux_1_1_x86_64.whl" }, { "algorithm": "sha256", - "hash": "1c28973decf4e0e69cee78c68e30a523be441972c826703bb93099868a8ff5b5", - "url": "https://files.pythonhosted.org/packages/21/d9/7924b194b3aa9bcc39f4592470995841efe71015cb8a79abae9bb043ec28/bcrypt-4.1.2-cp37-abi3-musllinux_1_2_aarch64.whl" + "hash": "ec3c2e1ca3e5c4b9edb94290b356d082b721f3f50758bce7cce11d8a7c89ce84", + "url": "https://files.pythonhosted.org/packages/12/d4/13b86b1bb2969a804c2347d0ad72fc3d3d9f5cf0d876c84451c6480e19bc/bcrypt-4.1.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" }, { "algorithm": "sha256", - "hash": "ea505c97a5c465ab8c3ba75c0805a102ce526695cd6818c6de3b1a38f6f60da1", - "url": "https://files.pythonhosted.org/packages/22/2e/32c1810b8470aca98c33892fc8c559c1be95eba711cb1bb82fbbf2a4752a/bcrypt-4.1.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl" + "hash": "551b320396e1d05e49cc18dd77d970accd52b322441628aca04801bbd1d52a73", + "url": "https://files.pythonhosted.org/packages/23/85/283450ee672719e216a5e1b0e80cb0c8f225bc0814cbb893155ee4fdbb9e/bcrypt-4.1.3-cp39-abi3-musllinux_1_2_aarch64.whl" }, { "algorithm": "sha256", - "hash": "57fa9442758da926ed33a91644649d3e340a71e2d0a5a8de064fb621fd5a3326", - "url": "https://files.pythonhosted.org/packages/41/ed/e446078ebe94d8ccac7170ff4bab83d8c86458c6fcfc7c5a4b449974fdd6/bcrypt-4.1.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" + "hash": "3a5be252fef513363fe281bafc596c31b552cf81d04c5085bc5dac29670faa08", + "url": "https://files.pythonhosted.org/packages/29/3c/6e478265f68eff764571676c0773086d15378fdf5347ddf53e5025c8b956/bcrypt-4.1.3-cp39-abi3-manylinux_2_28_aarch64.whl" }, { "algorithm": "sha256", - "hash": "2a298db2a8ab20056120b45e86c00a0a5eb50ec4075b6142db35f593b97cb3fb", - "url": "https://files.pythonhosted.org/packages/42/9d/a88027b5a8752f4b1831d957470f48e23cebc112aaf762880f3adbfba9cf/bcrypt-4.1.2-cp39-abi3-manylinux_2_28_x86_64.whl" + "hash": "31adb9cbb8737a581a843e13df22ffb7c84638342de3708a98d5c986770f2834", + "url": "https://files.pythonhosted.org/packages/2c/fd/0d2d7cc6fc816010f6c6273b778e2f147e2eca1144975b6b71e344b26ca0/bcrypt-4.1.3-cp39-abi3-musllinux_1_1_x86_64.whl" }, { "algorithm": "sha256", - "hash": "68e3c6642077b0c8092580c819c1684161262b2e30c4f45deb000c38947bf483", - "url": "https://files.pythonhosted.org/packages/42/c4/13c4bba7e25633b2e94724c642aa93ce376c476d80ecd50d73f0fe2eb38f/bcrypt-4.1.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl" + "hash": "6cac78a8d42f9d120b3987f82252bdbeb7e6e900a5e1ba37f6be6fe4e3848286", + "url": "https://files.pythonhosted.org/packages/2d/5e/edcb4ec57b056ca9d5f9fde31fcda10cc635def48867edff5cc09a348a4f/bcrypt-4.1.3-cp37-abi3-musllinux_1_2_aarch64.whl" }, { "algorithm": "sha256", - "hash": "44290ccc827d3a24604f2c8bcd00d0da349e336e6503656cb8192133e27335e2", - "url": "https://files.pythonhosted.org/packages/54/fc/fd9a299d4dfd7da38b4570e487ea2465fb92021ab31a08bd66b3caba0baa/bcrypt-4.1.2-cp37-abi3-musllinux_1_1_aarch64.whl" + "hash": "3d3b317050a9a711a5c7214bf04e28333cf528e0ed0ec9a4e55ba628d0f07c1a", + "url": "https://files.pythonhosted.org/packages/2f/f6/9c0a6de7ef78d573e10d0b7de3ef82454e2e6eb6fada453ea6c2b8fb3f0a/bcrypt-4.1.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" }, { "algorithm": "sha256", - "hash": "732b3920a08eacf12f93e6b04ea276c489f1c8fb49344f564cca2adb663b3e4c", - "url": "https://files.pythonhosted.org/packages/5a/5b/dfcd8b7422a8f3b4ce3d28d64307e2f3502e3b5c540dde35eccda2d6c763/bcrypt-4.1.2-cp37-abi3-musllinux_1_1_x86_64.whl" + "hash": "01746eb2c4299dd0ae1670234bf77704f581dd72cc180f444bfe74eb80495b64", + "url": "https://files.pythonhosted.org/packages/3b/5d/121130cc85009070fe4e4f5937b213a00db143147bc6c8677b3fd03deec8/bcrypt-4.1.3-cp37-abi3-musllinux_1_2_x86_64.whl" }, { "algorithm": "sha256", - "hash": "eb3bd3321517916696233b5e0c67fd7d6281f0ef48e66812db35fc963a422a1c", - "url": "https://files.pythonhosted.org/packages/6d/7c/761ab4586beb7aa14b3fa2f382794746a218fffe1d22d9e10926200c8ccd/bcrypt-4.1.2-cp37-abi3-manylinux_2_28_aarch64.whl" + "hash": "4fb253d65da30d9269e0a6f4b0de32bd657a0208a6f4e43d3e645774fb5457f3", + "url": "https://files.pythonhosted.org/packages/4c/6a/ce950d4350c734bc5d9b7196a58fedbdc94f564c00b495a1222984431e03/bcrypt-4.1.3-cp37-abi3-manylinux_2_28_x86_64.whl" }, { "algorithm": "sha256", - "hash": "33313a1200a3ae90b75587ceac502b048b840fc69e7f7a0905b5f87fac7a1258", - "url": "https://files.pythonhosted.org/packages/72/07/6a6f2047a9dc9d012b7b977e4041d37d078b76b44b7ee4daf331c1e6fb35/bcrypt-4.1.2.tar.gz" + "hash": "094fd31e08c2b102a14880ee5b3d09913ecf334cd604af27e1013c76831f7b05", + "url": "https://files.pythonhosted.org/packages/63/56/45312e49c195cd30e1bf4b7f0e039e8b3c46802cd55485947ddcb96caa27/bcrypt-4.1.3-cp37-abi3-manylinux_2_28_aarch64.whl" }, { "algorithm": "sha256", - "hash": "387e7e1af9a4dd636b9505a465032f2f5cb8e61ba1120e79a0e1cd0b512f3dfc", - "url": "https://files.pythonhosted.org/packages/72/3d/925adb5f5bef7616b504227a431fcaadd9630044802b5c81a31a560b4369/bcrypt-4.1.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" + "hash": "4a8bea4c152b91fd8319fef4c6a790da5c07840421c2b785084989bf8bbb7455", + "url": "https://files.pythonhosted.org/packages/7c/8d/ad2efe0ec57ed3c25e588c4543d946a1c72f8ee357a121c0e382d8aaa93f/bcrypt-4.1.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl" }, { "algorithm": "sha256", - "hash": "b90e216dc36864ae7132cb151ffe95155a37a14e0de3a8f64b49655dd959ff9c", - "url": "https://files.pythonhosted.org/packages/88/fd/6025f5530e6ac2513404aa2ab3fb935b9d992dbf24f255f03b5972dace74/bcrypt-4.1.2-cp39-abi3-musllinux_1_2_aarch64.whl" + "hash": "5f7cd3399fbc4ec290378b541b0cf3d4398e4737a65d0f938c7c0f9d5e686611", + "url": "https://files.pythonhosted.org/packages/97/00/21e34b365b895e6faf9cc5d4e7b97dd419e08f8a7df119792ec206b4a3fa/bcrypt-4.1.3-cp39-abi3-manylinux_2_28_x86_64.whl" }, { "algorithm": "sha256", - "hash": "6cad43d8c63f34b26aef462b6f5e44fdcf9860b723d2453b5d391258c4c8e966", - "url": "https://files.pythonhosted.org/packages/91/21/6350647549656138a067788d67bdb5ee89ffc2f025618ebf60d3806274c4/bcrypt-4.1.2-cp37-abi3-manylinux_2_28_x86_64.whl" + "hash": "f5698ce5292a4e4b9e5861f7e53b1d89242ad39d54c3da451a93cac17b61921a", + "url": "https://files.pythonhosted.org/packages/a4/9a/4aa31d1de9369737cfa734a60c3d125ecbd1b3ae2c6499986d0ac160ea8b/bcrypt-4.1.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl" }, { "algorithm": "sha256", - "hash": "71b8be82bc46cedd61a9f4ccb6c1a493211d031415a34adde3669ee1b0afbb63", - "url": "https://files.pythonhosted.org/packages/a4/72/a1276d2fbf5d1af0e29ff9fb5220ce1d49a5f94ccbfb4f9141c963ff9d0e/bcrypt-4.1.2-cp39-abi3-macosx_10_12_universal2.whl" + "hash": "0d4cf6ef1525f79255ef048b3489602868c47aea61f375377f0d00514fe4a78c", + "url": "https://files.pythonhosted.org/packages/a8/eb/fbea8d2b370a4cc7f5f0aff9f492177a5813e130edeab9dd388ddd3ef1dc/bcrypt-4.1.3-cp39-abi3-macosx_10_12_universal2.whl" }, { "algorithm": "sha256", - "hash": "3566a88234e8de2ccae31968127b0ecccbb4cddb629da744165db72b58d88ca4", - "url": "https://files.pythonhosted.org/packages/ac/c5/243674ec98288af9da31f5b137686746986d5d298dc520e243032160fd1b/bcrypt-4.1.2-cp39-abi3-musllinux_1_1_x86_64.whl" + "hash": "193bb49eeeb9c1e2db9ba65d09dc6384edd5608d9d672b4125e9320af9153a15", + "url": "https://files.pythonhosted.org/packages/af/a1/36aa84027ef45558b30a485bc5b0606d5e7357b27b10cc49dce3944f4d1d/bcrypt-4.1.3-cp37-abi3-musllinux_1_1_aarch64.whl" }, { "algorithm": "sha256", - "hash": "f70d9c61f9c4ca7d57f3bfe88a5ccf62546ffbadf3681bb1e268d9d2e41c91a7", - "url": "https://files.pythonhosted.org/packages/b6/1b/1c1cf4efe142dfe6fab912c16766d3eab65b87f33f1d13a08238afce5fdf/bcrypt-4.1.2-cp39-abi3-manylinux_2_28_aarch64.whl" + "hash": "2ee15dd749f5952fe3f0430d0ff6b74082e159c50332a1413d51b5689cf06623", + "url": "https://files.pythonhosted.org/packages/ca/e9/0b36987abbcd8c9210c7b86673d88ff0a481b4610630710fb80ba5661356/bcrypt-4.1.3.tar.gz" }, { "algorithm": "sha256", - "hash": "b8df79979c5bae07f1db22dcc49cc5bccf08a0380ca5c6f391cbb5790355c0b0", - "url": "https://files.pythonhosted.org/packages/bf/26/ec53ccf5cadc81891d53cf0c117cff0f973d98cab6e9d6979578ca5aceeb/bcrypt-4.1.2-cp37-abi3-musllinux_1_2_x86_64.whl" + "hash": "c4c8d9b3e97209dd7111bf726e79f638ad9224b4691d1c7cfefa571a09b1b2d6", + "url": "https://files.pythonhosted.org/packages/e0/c9/069b0c3683ce969b328b7b3e3218f9d5981d0629f6091b3b1dfa72928f75/bcrypt-4.1.3-cp39-abi3-musllinux_1_1_aarch64.whl" }, { "algorithm": "sha256", - "hash": "ac621c093edb28200728a9cca214d7e838529e557027ef0581685909acd28b5e", - "url": "https://files.pythonhosted.org/packages/df/cc/5a73c2ecfa9f255423530e8aeaceb0590da12e4c83c99fdac17093f5ce42/bcrypt-4.1.2-cp37-abi3-macosx_10_12_universal2.whl" + "hash": "48429c83292b57bf4af6ab75809f8f4daf52aa5d480632e53707805cc1ce9b74", + "url": "https://files.pythonhosted.org/packages/fe/4e/e424a74f0749998d8465c162c5cb9d9f210a5b60444f4120eff0af3fa800/bcrypt-4.1.3-cp37-abi3-macosx_10_12_universal2.whl" } ], "project_name": "bcrypt", @@ -917,7 +960,7 @@ "pytest!=3.3.0,>=3.2.1; extra == \"tests\"" ], "requires_python": ">=3.7", - "version": "4.1.2" + "version": "4.1.3" }, { "artifacts": [ @@ -947,48 +990,48 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "db7bbb1c6059e99b74dcf634e497b04addcac4c527ae2b2696e47c39eccc6c50", - "url": "https://files.pythonhosted.org/packages/4b/bb/f3a77166e1917b4269f13752edbabbd8aa022a442f51e4779247fb9a1253/boto3-1.34.92-py3-none-any.whl" + "hash": "b8433d481d50b68a0162c0379c0dd4aabfc3d1ad901800beb5b87815997511c1", + "url": "https://files.pythonhosted.org/packages/9a/b0/a4301290ea6cdbb0cda7048ae11b0e560eacca7d2c2e64e6b3d5a9fb3fde/boto3-1.34.144-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "684cba753d64978a486e8ea9645d53de0d4e3b4a3ab1495b26bd04b9541cea2d", - "url": "https://files.pythonhosted.org/packages/c5/d2/57a90069ab4de1780a16d1a670c14098a417b676ab14724402a32feb695a/boto3-1.34.92.tar.gz" + "hash": "2f3e88b10b8fcc5f6100a9d74cd28230edc9d4fa226d99dd40a3ab38ac213673", + "url": "https://files.pythonhosted.org/packages/42/e5/738f7bf96f4f5597c8393e11be2c28bef5f876b5635c1ea9d86888e59657/boto3-1.34.144.tar.gz" } ], "project_name": "boto3", "requires_dists": [ - "botocore<1.35.0,>=1.34.92", + "botocore<1.35.0,>=1.34.144", "botocore[crt]<2.0a0,>=1.21.0; extra == \"crt\"", "jmespath<2.0.0,>=0.7.1", "s3transfer<0.11.0,>=0.10.0" ], "requires_python": ">=3.8", - "version": "1.34.92" + "version": "1.34.144" }, { "artifacts": [ { "algorithm": "sha256", - "hash": "4211a22a1f6c6935e70cbb84c2cd93b29f9723eaf5036d59748dd104f389a681", - "url": "https://files.pythonhosted.org/packages/c4/58/c25d117142140b65f0782bc066b4b52a84454075e4e575bf37d1c7fb0ad9/botocore-1.34.92-py3-none-any.whl" + "hash": "a2cf26e1bf10d5917a2285e50257bc44e94a1d16574f282f3274f7a5d8d1f08b", + "url": "https://files.pythonhosted.org/packages/24/4b/956a80d406dfffba1f8f7fbaba7dd73d418ed8a7b95faa1ade7cf17663a5/botocore-1.34.144-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "d1ca4886271f184445ec737cd2e752498648cca383887c5a37b2e01c8ab94039", - "url": "https://files.pythonhosted.org/packages/d5/d0/e263194220495a7a61cad619100db16128bd0a3ab1ea73f1e540905ca29a/botocore-1.34.92.tar.gz" + "hash": "4215db28d25309d59c99507f1f77df9089e5bebbad35f6e19c7c44ec5383a3e8", + "url": "https://files.pythonhosted.org/packages/8c/66/01d63edf404b2ef2c5594701565ac0c031ce7253231298d423e2514566b8/botocore-1.34.144.tar.gz" } ], "project_name": "botocore", "requires_dists": [ - "awscrt==0.20.9; extra == \"crt\"", + "awscrt==0.20.11; extra == \"crt\"", "jmespath<2.0.0,>=0.7.1", "python-dateutil<3.0.0,>=2.1", "urllib3!=2.2.0,<3,>=1.25.4; python_version >= \"3.10\"", "urllib3<1.27,>=1.25.4; python_version < \"3.10\"" ], "requires_python": ">=3.8", - "version": "1.34.92" + "version": "1.34.144" }, { "artifacts": [ @@ -1086,19 +1129,19 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1", - "url": "https://files.pythonhosted.org/packages/ba/06/a07f096c664aeb9f01624f858c3add0a4e913d6c96257acb4fce61e7de14/certifi-2024.2.2-py3-none-any.whl" + "hash": "c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90", + "url": "https://files.pythonhosted.org/packages/1c/d5/c84e1a17bf61d4df64ca866a1c9a913874b4e9bdc131ec689a0ad013fb36/certifi-2024.7.4-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f", - "url": "https://files.pythonhosted.org/packages/71/da/e94e26401b62acd6d91df2b52954aceb7f561743aa5ccc32152886c76c96/certifi-2024.2.2.tar.gz" + "hash": "5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b", + "url": "https://files.pythonhosted.org/packages/c2/02/a95f2b11e207f68bc64d7aae9666fed2e2b3f307748d5123dffb72a1bbea/certifi-2024.7.4.tar.gz" } ], "project_name": "certifi", "requires_dists": [], "requires_python": ">=3.6", - "version": "2024.2.2" + "version": "2024.7.4" }, { "artifacts": [ @@ -1302,103 +1345,103 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "1b95b98b0d2af784078fa69f637135e3c317091b615cd0905f8b8a087e86fa30", - "url": "https://files.pythonhosted.org/packages/ca/2e/9f2c49bd6a18d46c05ec098b040e7d4599c61f50ced40a39adfae3f68306/cryptography-42.0.5-cp39-abi3-musllinux_1_2_x86_64.whl" + "hash": "b297f90c5723d04bcc8265fc2a0f86d4ea2e0f7ab4b6994459548d3a6b992a14", + "url": "https://files.pythonhosted.org/packages/fd/2b/be327b580645927bb1a1f32d5a175b897a9b956bc085b095e15c40bac9ed/cryptography-42.0.8-cp39-abi3-musllinux_1_2_x86_64.whl" }, { "algorithm": "sha256", - "hash": "7367d7b2eca6513681127ebad53b2582911d1736dc2ffc19f2c3ae49997496bc", - "url": "https://files.pythonhosted.org/packages/0e/1d/62a2324882c0db89f64358dadfb95cae024ee3ba9fde3d5fd4d2f58af9f5/cryptography-42.0.5-cp39-abi3-manylinux_2_28_aarch64.whl" + "hash": "6b7c4f03ce01afd3b76cf69a5455caa9cfa3de8c8f493e0d3ab7d20611c8dae9", + "url": "https://files.pythonhosted.org/packages/07/40/d6f6819c62e808ea74639c3c640f7edd636b86cce62cb14943996a15df92/cryptography-42.0.8-cp37-abi3-manylinux_2_28_x86_64.whl" }, { "algorithm": "sha256", - "hash": "6fe07eec95dfd477eb9530aef5bead34fec819b3aaf6c5bd6d20565da607bfe1", - "url": "https://files.pythonhosted.org/packages/13/9e/a55763a32d340d7b06d045753c186b690e7d88780cafce5f88cb931536be/cryptography-42.0.5.tar.gz" + "hash": "cafb92b2bc622cd1aa6a1dce4b93307792633f4c5fe1f46c6b97cf67073ec961", + "url": "https://files.pythonhosted.org/packages/0f/38/85c74d0ac4c540780e072b1e6f148ecb718418c1062edcb20d22f3ec5bbb/cryptography-42.0.8-cp39-abi3-musllinux_1_1_x86_64.whl" }, { "algorithm": "sha256", - "hash": "2bce03af1ce5a5567ab89bd90d11e7bbdff56b8af3acbbec1faded8f44cb06da", - "url": "https://files.pythonhosted.org/packages/2c/9c/821ef6144daf80360cf6093520bf07eec7c793103ed4b1bf3fa17d2b55d8/cryptography-42.0.5-cp37-abi3-musllinux_1_2_x86_64.whl" + "hash": "ad803773e9df0b92e0a817d22fd8a3675493f690b96130a5e24f1b8fabbea9c7", + "url": "https://files.pythonhosted.org/packages/25/c9/86f04e150c5d5d5e4a731a2c1e0e43da84d901f388e3fea3d5de98d689a7/cryptography-42.0.8-cp37-abi3-musllinux_1_1_x86_64.whl" }, { "algorithm": "sha256", - "hash": "cd2030f6650c089aeb304cf093f3244d34745ce0cfcc39f20c6fbfe030102e2a", - "url": "https://files.pythonhosted.org/packages/48/c8/c0962598c43d3cff2c9d6ac66d0c612bdfb1975be8d87b8889960cf8c81d/cryptography-42.0.5-cp39-abi3-manylinux_2_28_x86_64.whl" + "hash": "e599b53fd95357d92304510fb7bda8523ed1f79ca98dce2f43c115950aa78801", + "url": "https://files.pythonhosted.org/packages/35/66/2d87e9ca95c82c7ee5f2c09716fc4c4242c1ae6647b9bd27e55e920e9f10/cryptography-42.0.8-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" }, { "algorithm": "sha256", - "hash": "e4985a790f921508f36f81831817cbc03b102d643b5fcb81cd33df3fa291a1a1", - "url": "https://files.pythonhosted.org/packages/50/26/248cd8b6809635ed412159791c0d3869d8ec9dfdc57d428d500a14d425b7/cryptography-42.0.5-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl" + "hash": "c4783183f7cb757b73b2ae9aed6599b96338eb957233c58ca8f49a49cc32fd5e", + "url": "https://files.pythonhosted.org/packages/43/c2/4a3eef67e009a522711ebd8ac89424c3a7fe591ece7035d964419ad52a1d/cryptography-42.0.8-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl" }, { "algorithm": "sha256", - "hash": "b8cac287fafc4ad485b8a9b67d0ee80c66bf3574f655d3b97ef2e1082360faf1", - "url": "https://files.pythonhosted.org/packages/5b/3d/c3c21e3afaf43bacccc3ebf61d1a0d47cef6e2607dbba01662f6f9d8fc40/cryptography-42.0.5-cp37-abi3-musllinux_1_1_x86_64.whl" + "hash": "a0608251135d0e03111152e41f0cc2392d1e74e35703960d4190b2e0f4ca9c70", + "url": "https://files.pythonhosted.org/packages/49/1c/9f6d13cc8041c05eebff1154e4e71bedd1db8e174fff999054435994187a/cryptography-42.0.8-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" }, { "algorithm": "sha256", - "hash": "f8837fe1d6ac4a8052a9a8ddab256bc006242696f03368a4009be7ee3075cdb7", - "url": "https://files.pythonhosted.org/packages/64/f7/d3c83c79947cc6807e6acd3b2d9a1cbd312042777bc7eec50c869913df79/cryptography-42.0.5-cp37-abi3-manylinux_2_28_x86_64.whl" + "hash": "2f66d9cd9147ee495a8374a45ca445819f8929a3efcd2e3df6428e46c3cbb10b", + "url": "https://files.pythonhosted.org/packages/53/c2/903014dafb7271fb148887d4355b2e90319cad6e810663be622b0c933fc9/cryptography-42.0.8-cp37-abi3-musllinux_1_2_aarch64.whl" }, { "algorithm": "sha256", - "hash": "a2913c5375154b6ef2e91c10b5720ea6e21007412f6437504ffea2109b5a33d7", - "url": "https://files.pythonhosted.org/packages/69/f6/630eb71f246208103ffee754b8375b6b334eeedb28620b3ae57be815eeeb/cryptography-42.0.5-cp39-abi3-musllinux_1_1_aarch64.whl" + "hash": "2346b911eb349ab547076f47f2e035fc8ff2c02380a7cbbf8d87114fa0f1c583", + "url": "https://files.pythonhosted.org/packages/5c/46/de71d48abf2b6d3c808f4fbb0f4dc44a4e72786be23df0541aa2a3f6fd7e/cryptography-42.0.8-cp37-abi3-musllinux_1_1_aarch64.whl" }, { "algorithm": "sha256", - "hash": "5e6275c09d2badf57aea3afa80d975444f4be8d3bc58f7f80d2a484c6f9485c8", - "url": "https://files.pythonhosted.org/packages/6d/4d/f7c14c7a49e35df829e04d451a57b843208be7442c8e087250c195775be1/cryptography-42.0.5-cp39-abi3-macosx_10_12_universal2.whl" + "hash": "e3ec3672626e1b9e55afd0df6d774ff0e953452886e06e0f1eb7eb0c832e8902", + "url": "https://files.pythonhosted.org/packages/5d/32/f6326c70a9f0f258a201d3b2632bca586ea24d214cec3cf36e374040e273/cryptography-42.0.8-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl" }, { "algorithm": "sha256", - "hash": "c41fb5e6a5fe9ebcd58ca3abfeb51dffb5d83d6775405305bfa8715b76521922", - "url": "https://files.pythonhosted.org/packages/7d/bc/b6c691c960b5dcd54c5444e73af7f826e62af965ba59b6d7e9928b6489a2/cryptography-42.0.5-cp39-abi3-musllinux_1_1_x86_64.whl" + "hash": "dc0fdf6787f37b1c6b08e6dfc892d9d068b5bdb671198c72072828b80bd5fe4c", + "url": "https://files.pythonhosted.org/packages/5f/f9/c3d4f19b82bdb25a3d857fe96e7e571c981810e47e3f299cc13ac429066a/cryptography-42.0.8-cp39-abi3-manylinux_2_28_aarch64.whl" }, { "algorithm": "sha256", - "hash": "b03c2ae5d2f0fc05f9a2c0c997e1bc18c8229f392234e8a0194f202169ccd278", - "url": "https://files.pythonhosted.org/packages/8c/50/9185cca136596448d9cc595ae22a9bd4412ad35d812550c37c1390d54673/cryptography-42.0.5-cp37-abi3-manylinux_2_28_aarch64.whl" + "hash": "dea567d1b0e8bc5764b9443858b673b734100c2871dc93163f58c46a97a83d28", + "url": "https://files.pythonhosted.org/packages/60/12/f064af29190cdb1d38fe07f3db6126091639e1dece7ec77c4ff037d49193/cryptography-42.0.8-cp39-abi3-macosx_10_12_universal2.whl" }, { "algorithm": "sha256", - "hash": "3eaafe47ec0d0ffcc9349e1708be2aaea4c6dd4978d76bf6eb0cb2c13636c6fc", - "url": "https://files.pythonhosted.org/packages/c2/40/c7cb9d6819b90640ffc3c4028b28f46edc525feaeaa0d98ea23e843d446d/cryptography-42.0.5-cp39-abi3-musllinux_1_2_aarch64.whl" + "hash": "31f721658a29331f895a5a54e7e82075554ccfb8b163a18719d342f5ffe5ecb1", + "url": "https://files.pythonhosted.org/packages/89/f4/a8b982e88eb5350407ebdbf4717b55043271d878705329e107f4783555f2/cryptography-42.0.8-cp39-abi3-musllinux_1_2_aarch64.whl" }, { "algorithm": "sha256", - "hash": "a30596bae9403a342c978fb47d9b0ee277699fa53bbafad14706af51fe543d16", - "url": "https://files.pythonhosted.org/packages/d1/f1/fd98e6e79242d9aeaf6a5d49639a7e85f05741575af14d3f4a1d477f572e/cryptography-42.0.5-cp37-abi3-macosx_10_12_universal2.whl" + "hash": "8d09d05439ce7baa8e9e95b07ec5b6c886f548deb7e0f69ef25f64b3bce842f2", + "url": "https://files.pythonhosted.org/packages/93/a7/1498799a2ea06148463a9a2c10ab2f6a921a74fb19e231b27dc412a748e2/cryptography-42.0.8.tar.gz" }, { "algorithm": "sha256", - "hash": "7cde5f38e614f55e28d831754e8a3bacf9ace5d1566235e39d91b35502d6936e", - "url": "https://files.pythonhosted.org/packages/d4/fa/057f9d7a5364c86ccb6a4bd4e5c58920dcb66532be0cc21da3f9c7617ec3/cryptography-42.0.5-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" + "hash": "d45b940883a03e19e944456a558b67a41160e367a719833c53de6911cabba2b7", + "url": "https://files.pythonhosted.org/packages/95/26/82d704d988a193cbdc69ac3b41c687c36eaed1642cce52530ad810c35645/cryptography-42.0.8-cp37-abi3-musllinux_1_2_x86_64.whl" }, { "algorithm": "sha256", - "hash": "16a48c23a62a2f4a285699dba2e4ff2d1cff3115b9df052cdd976a18856d8e3d", - "url": "https://files.pythonhosted.org/packages/d8/b1/127ecb373d02db85a7a7de5093d7ac7b7714b8907d631f0591e8f002998d/cryptography-42.0.5-cp37-abi3-musllinux_1_2_aarch64.whl" + "hash": "fff12c88a672ab9c9c1cf7b0c80e3ad9e2ebd9d828d955c126be4fd3e5578c9e", + "url": "https://files.pythonhosted.org/packages/a2/68/e16751f6b859bc120f53fddbf3ebada5c34f0e9689d8af32884d8b2e4b4c/cryptography-42.0.8-cp39-abi3-musllinux_1_1_aarch64.whl" }, { "algorithm": "sha256", - "hash": "b7ffe927ee6531c78f81aa17e684e2ff617daeba7f189f911065b2ea2d526dec", - "url": "https://files.pythonhosted.org/packages/d9/f9/27dda069a9f9bfda7c75305e222d904cc2445acf5eab5c696ade57d36f1b/cryptography-42.0.5-cp37-abi3-macosx_10_12_x86_64.whl" + "hash": "5226d5d21ab681f432a9c1cf8b658c0cb02533eece706b155e5fbd8a0cdd3949", + "url": "https://files.pythonhosted.org/packages/c2/de/8083fa2e68d403553a01a9323f4f8b9d7ffed09928ba25635c29fb28c1e7/cryptography-42.0.8-cp37-abi3-manylinux_2_28_aarch64.whl" }, { "algorithm": "sha256", - "hash": "2424ff4c4ac7f6b8177b53c17ed5d8fa74ae5955656867f5a8affaca36a27abb", - "url": "https://files.pythonhosted.org/packages/e2/59/61b2364f2a4d3668d933531bc30d012b9b2de1e534df4805678471287d57/cryptography-42.0.5-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl" + "hash": "81d8a521705787afe7a18d5bfb47ea9d9cc068206270aad0b96a725022e18d2e", + "url": "https://files.pythonhosted.org/packages/f9/8b/1b929ba8139430e09e140e6939c2b29c18df1f2fc2149e41bdbdcdaf5d1f/cryptography-42.0.8-cp37-abi3-macosx_10_12_universal2.whl" }, { "algorithm": "sha256", - "hash": "0270572b8bd2c833c3981724b8ee9747b3ec96f699a9665470018594301439ee", - "url": "https://files.pythonhosted.org/packages/e5/61/67e090a41c70ee526bd5121b1ccabab85c727574332d03326baaedea962d/cryptography-42.0.5-cp37-abi3-musllinux_1_1_aarch64.whl" + "hash": "961e61cefdcb06e0c6d7e3a1b22ebe8b996eb2bf50614e89384be54c48c6b63d", + "url": "https://files.pythonhosted.org/packages/fa/5d/31d833daa800e4fab33209843095df7adb4a78ea536929145534cbc15026/cryptography-42.0.8-cp37-abi3-macosx_10_12_x86_64.whl" }, { "algorithm": "sha256", - "hash": "329906dcc7b20ff3cad13c069a78124ed8247adcac44b10bea1130e36caae0b4", - "url": "https://files.pythonhosted.org/packages/fb/0b/14509319a1b49858425553d2fb3808579cfdfe98c1d71a3f046c1b4e0108/cryptography-42.0.5-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" + "hash": "9c0c1716c8447ee7dbf08d6db2e5c41c688544c61074b54fc4564196f55c25a7", + "url": "https://files.pythonhosted.org/packages/fa/e2/b7e6e8c261536c489d9cf908769880d94bd5d9a187e166b0dc838d2e6a56/cryptography-42.0.8-cp39-abi3-manylinux_2_28_x86_64.whl" } ], "project_name": "cryptography", @@ -1425,7 +1468,7 @@ "sphinxcontrib-spelling>=4.0.1; extra == \"docstest\"" ], "requires_python": ">=3.7", - "version": "42.0.5" + "version": "42.0.8" }, { "artifacts": [ @@ -1448,6 +1491,27 @@ "requires_python": "<3.13,>=3.7", "version": "0.5.14" }, + { + "artifacts": [ + { + "algorithm": "sha256", + "hash": "e818e6913f26c2a81eadef503a2741d7cca7f235d20e217274a009ecd5a74abf", + "url": "https://files.pythonhosted.org/packages/1b/c2/4bc8cd09b14e28ce3f406a8b05761bed0d785d1ca8c2a5c6684d884c66a2/editor-1.6.6-py3-none-any.whl" + }, + { + "algorithm": "sha256", + "hash": "bb6989e872638cd119db9a4fce284cd8e13c553886a1c044c6b8d8a160c871f8", + "url": "https://files.pythonhosted.org/packages/2a/92/734a4ab345914259cb6146fd36512608ea42be16195375c379046f33283d/editor-1.6.6.tar.gz" + } + ], + "project_name": "editor", + "requires_dists": [ + "runs", + "xmod" + ], + "requires_python": ">=3.8", + "version": "1.6.6" + }, { "artifacts": [ { @@ -1618,13 +1682,13 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "d452ad095688cd52bae0ad6fafe027f6a6d6f560e810fec20914e17a09526415", - "url": "https://files.pythonhosted.org/packages/9e/8d/ddbcf81ec751d8ee5fd18ac11ff38a0e110f39dfbf105e6d9db69d556dd0/google_auth-2.29.0-py2.py3-none-any.whl" + "hash": "53326ea2ebec768070a94bee4e1b9194c9646ea0c2bd72422785bd0f9abfad7b", + "url": "https://files.pythonhosted.org/packages/e7/00/85c22f7f73fa2e88dfbf0e1f63c565386ba40e0264b59c8a4362ae27c9fc/google_auth-2.32.0-py2.py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "672dff332d073227550ffc7457868ac4218d6c500b155fe6cc17d2b13602c360", - "url": "https://files.pythonhosted.org/packages/18/b2/f14129111cfd61793609643a07ecb03651a71dd65c6974f63b0310ff4b45/google-auth-2.29.0.tar.gz" + "hash": "49315be72c55a6a37d62819e3573f6b416aca00721f7e3e31a008d928bf64022", + "url": "https://files.pythonhosted.org/packages/8c/a3/cc4dc2e8a7f67012a26dec5b6b1fdf5261b12e7d54981c88de2580315726/google_auth-2.32.0.tar.gz" } ], "project_name": "google-auth", @@ -1642,7 +1706,7 @@ "rsa<5,>=3.1.4" ], "requires_python": ">=3.7", - "version": "2.29.0" + "version": "2.32.0" }, { "artifacts": [ @@ -2016,13 +2080,13 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "ce284a76d5b1377fd8836733b983bfb0b76f1aa1c090de2566fcf008d7f6ab16", - "url": "https://files.pythonhosted.org/packages/aa/2b/2ae0c789fd08d5b44e745726d08a17e6d3d7d09071d05473105edc7615f2/humanize-4.9.0-py3-none-any.whl" + "hash": "39e7ccb96923e732b5c2e27aeaa3b10a8dfeeba3eb965ba7b74a3eb0e30040a6", + "url": "https://files.pythonhosted.org/packages/8f/49/a29c79bea335e52fb512a43faf84998c184c87fef82c65f568f8c56f2642/humanize-4.10.0-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "582a265c931c683a7e9b8ed9559089dea7edcf6cc95be39a3cbc2c5d5ac2bcfa", - "url": "https://files.pythonhosted.org/packages/76/21/7a0b24fae849562397efd79da58e62437243ae0fd0f6c09c6bc26225b75c/humanize-4.9.0.tar.gz" + "hash": "06b6eb0293e4b85e8d385397c5868926820db32b9b654b932f57fa41c23c9978", + "url": "https://files.pythonhosted.org/packages/5d/b1/c8f05d5dc8f64030d8cc71e91307c1daadf6ec0d70bcd6eabdfd9b6f153f/humanize-4.10.0.tar.gz" } ], "project_name": "humanize", @@ -2032,7 +2096,7 @@ "pytest; extra == \"tests\"" ], "requires_python": ">=3.8", - "version": "4.9.0" + "version": "4.10.0" }, { "artifacts": [ @@ -2092,23 +2156,23 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "ea44fe074e878e74af05b6902314d7d7b8ff5d2db5259bcb37d6c5ec9b75afa6", - "url": "https://files.pythonhosted.org/packages/c7/cf/9c3a415723c9fe6e876d37054a95a17e1a3cbb279ab4e6f7770b11b422af/inquirer-2.9.2-py3-none-any.whl" + "hash": "c4be527e8c4e7a1b2c909aa064ef6f1a4466be42224290f21f07f6d5947171f4", + "url": "https://files.pythonhosted.org/packages/b5/b7/f6be6ffc9d07b95339ea5b3269eafcf6539db15b5f7b910323994ecff8a5/inquirer-3.3.0-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "4a53cb9386601476e9f3241adace469ae6d1143ace2ee82f2768149e85861ab8", - "url": "https://files.pythonhosted.org/packages/4a/3a/8028f3f48e314c89d2153d05e7a2e6e79a31bdf0f5332af3af8df9f306cc/inquirer-2.9.2.tar.gz" + "hash": "2722cec4460b289aab21fc35a3b03c932780ff4e8004163955a8215e20cfd35e", + "url": "https://files.pythonhosted.org/packages/f2/33/d495a92c48203f33d2f4556a0a662dab1bd511a7458910c866f1d7a6a1a3/inquirer-3.3.0.tar.gz" } ], "project_name": "inquirer", "requires_dists": [ "blessed>=1.19.0", - "python-editor>=1.0.4", - "readchar>=2.0.1" + "editor>=1.6.0", + "readchar>=3.0.6" ], - "requires_python": ">=3.7", - "version": "2.9.2" + "requires_python": ">=3.8.1", + "version": "3.3.0" }, { "artifacts": [ @@ -2134,13 +2198,13 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa", - "url": "https://files.pythonhosted.org/packages/30/6d/6de6be2d02603ab56e72997708809e8a5b0fbfee080735109b40a3564843/Jinja2-3.1.3-py3-none-any.whl" + "hash": "bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d", + "url": "https://files.pythonhosted.org/packages/31/80/3a54838c3fb461f6fec263ebf3a3a41771bd05190238de3486aae8540c36/jinja2-3.1.4-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90", - "url": "https://files.pythonhosted.org/packages/b2/5e/3a21abf3cd467d7876045335e681d276ac32492febe6d98ad89562d1a7e1/Jinja2-3.1.3.tar.gz" + "hash": "4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369", + "url": "https://files.pythonhosted.org/packages/ed/55/39036716d19cab0747a5020fc7e907f362fbf48c984b14e62127f7e68e5d/jinja2-3.1.4.tar.gz" } ], "project_name": "jinja2", @@ -2149,7 +2213,7 @@ "MarkupSafe>=2.0" ], "requires_python": ">=3.7", - "version": "3.1.3" + "version": "3.1.4" }, { "artifacts": [ @@ -2173,13 +2237,13 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "3b7bd22f058434e3b9a7ea4b1500ed47de2713872288c0d511d19926f99b459f", - "url": "https://files.pythonhosted.org/packages/75/6d/d7b55b9c1ac802ab066b3e5015e90faab1fffbbd67a2af498ffc6cc81c97/jupyter_client-8.6.1-py3-none-any.whl" + "hash": "50cbc5c66fd1b8f65ecb66bc490ab73217993632809b6e505687de18e9dea39f", + "url": "https://files.pythonhosted.org/packages/cf/d3/c4bb02580bc0db807edb9a29b2d0c56031be1ef0d804336deb2699a470f6/jupyter_client-8.6.2-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "e842515e2bab8e19186d89fdfea7abd15e39dd581f94e399f00e2af5a1652d3f", - "url": "https://files.pythonhosted.org/packages/cd/5a/f4583ba60733b5a1cb1379acdf7c710fb1fb4d9df936a740f8cadf25d853/jupyter_client-8.6.1.tar.gz" + "hash": "2bda14d55ee5ba58552a8c53ae43d215ad9868853489213f37da060ced54d8df", + "url": "https://files.pythonhosted.org/packages/ff/61/3cd51dea7878691919adc34ff6ad180f13bfe25fb8c7662a9ee6dc64e643/jupyter_client-8.6.2.tar.gz" } ], "project_name": "jupyter-client", @@ -2197,7 +2261,7 @@ "pytest-cov; extra == \"test\"", "pytest-jupyter[client]>=0.4.1; extra == \"test\"", "pytest-timeout; extra == \"test\"", - "pytest; extra == \"test\"", + "pytest<8.2.0; extra == \"test\"", "python-dateutil>=2.8.2", "pyzmq>=23.0", "sphinx-autodoc-typehints; extra == \"docs\"", @@ -2208,7 +2272,7 @@ "traitlets>=5.3" ], "requires_python": ">=3.8", - "version": "8.6.1" + "version": "8.6.2" }, { "artifacts": [ @@ -2355,13 +2419,13 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "5324b88089a8978bf76d1629774fcc2f1c07b82acdf00f4c5dd8ceadfffc4b40", - "url": "https://files.pythonhosted.org/packages/c6/c9/9cd84cbd5816aa8bee5fd5a00f857efd636ec30586848d571b67baf0b868/Mako-1.3.3-py3-none-any.whl" + "hash": "260f1dbc3a519453a9c856dedfe4beb4e50bd5a26d96386cb6c80856556bb91a", + "url": "https://files.pythonhosted.org/packages/03/62/70f5a0c2dd208f9f3f2f9afd103aec42ee4d9ad2401d78342f75e9b8da36/Mako-1.3.5-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "e16c01d9ab9c11f7290eef1cfefc093fb5a45ee4a3da09e2fec2e4d1bae54e73", - "url": "https://files.pythonhosted.org/packages/0a/dc/48e8853daf4b32748d062ce9cd47a744755fb60691ebc211ca689b849c1c/Mako-1.3.3.tar.gz" + "hash": "48dbc20568c1d276a2698b36d968fa76161bf127194907ea6fc594fa81f943bc", + "url": "https://files.pythonhosted.org/packages/67/03/fb5ba97ff65ce64f6d35b582aacffc26b693a98053fa831ab43a437cbddb/Mako-1.3.5.tar.gz" } ], "project_name": "mako", @@ -2372,7 +2436,7 @@ "pytest; extra == \"testing\"" ], "requires_python": ">=3.8", - "version": "1.3.3" + "version": "1.3.5" }, { "artifacts": [ @@ -2475,13 +2539,13 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "f085493f79efb0644f270a9bf2892843142d80d7174bbbd2f3713f2a589dc633", - "url": "https://files.pythonhosted.org/packages/38/04/37055b7013dfaaf66e3a9a51e46857cc9be151476a891b995fa70da7e139/marshmallow-3.21.1-py3-none-any.whl" + "hash": "86ce7fb914aa865001a4b2092c4c2872d13bc347f3d42673272cabfdbad386f1", + "url": "https://files.pythonhosted.org/packages/96/d7/f318261e6ccbba86bdf626e07cd850981508fdaec52cfcdc4ac1030327ab/marshmallow-3.21.3-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "4e65e9e0d80fc9e609574b9983cf32579f305c718afb30d7233ab818571768c3", - "url": "https://files.pythonhosted.org/packages/5b/17/1b117d1875d8287a85cc2d5e2effd3f31bd8afd9f142c7b8391b9d665f0c/marshmallow-3.21.1.tar.gz" + "hash": "4f57c5e050a54d66361e826f94fba213eb10b67b2fdb02c3e0343ce207ba1662", + "url": "https://files.pythonhosted.org/packages/d6/31/0881962e77efa2d524ca80566ba1fb7cab000edaa9f4152b97a39b8d9a2d/marshmallow-3.21.3.tar.gz" } ], "project_name": "marshmallow", @@ -2494,25 +2558,25 @@ "pytest; extra == \"tests\"", "pytz; extra == \"tests\"", "simplejson; extra == \"tests\"", - "sphinx-issues==4.0.0; extra == \"docs\"", + "sphinx-issues==4.1.0; extra == \"docs\"", "sphinx-version-warning==1.1.2; extra == \"docs\"", - "sphinx==7.2.6; extra == \"docs\"", + "sphinx==7.3.7; extra == \"docs\"", "tox; extra == \"dev\"" ], "requires_python": ">=3.8", - "version": "3.21.1" + "version": "3.21.3" }, { "artifacts": [ { "algorithm": "sha256", - "hash": "b51b3bb70691f57f974e257e367107857a93b36f322a9e6d44ca5bf28ec2def9", - "url": "https://files.pythonhosted.org/packages/e5/3c/fe85f19699a7b40c8f9ce8ecee7e269b9b3c94099306df6f9891bdefeedd/mdit_py_plugins-0.4.0-py3-none-any.whl" + "hash": "1020dfe4e6bfc2c79fb49ae4e3f5b297f5ccd20f010187acc52af2921e27dc6a", + "url": "https://files.pythonhosted.org/packages/ef/f7/8a4dcea720a581e69ac8c5a38524baf0e3e2bb5f3819a9ff661464fe7d10/mdit_py_plugins-0.4.1-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "d8ab27e9aed6c38aa716819fedfde15ca275715955f8a185a8e1cf90fb1d2c1b", - "url": "https://files.pythonhosted.org/packages/b4/db/61960d68d5c39ff0dd48cb799a39ae4e297f6e9b96bf2f8da29d897fba0c/mdit_py_plugins-0.4.0.tar.gz" + "hash": "834b8ac23d1cd60cec703646ffd22ae97b7955a6d596eb1d304be1e251ae499c", + "url": "https://files.pythonhosted.org/packages/00/6c/79c52651b22b64dba5c7bbabd7a294cc410bfb2353250dc8ade44d7d8ad8/mdit_py_plugins-0.4.1.tar.gz" } ], "project_name": "mdit-py-plugins", @@ -2527,7 +2591,7 @@ "sphinx-book-theme; extra == \"rtd\"" ], "requires_python": ">=3.8", - "version": "0.4.0" + "version": "0.4.1" }, { "artifacts": [ @@ -2569,59 +2633,59 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "e1dd7839443592d00e96db831eddb4111a2a81a46b028f0facd60a09ebbdd543", - "url": "https://files.pythonhosted.org/packages/cb/46/f97bedf3ab16d38eeea0aafa3ad93cc7b9adf898218961faaea9c3c639f1/msgpack-1.0.8-cp312-cp312-musllinux_1_1_x86_64.whl" + "hash": "c8d6779aaaa5bfacee74df1fc23aee68f9d445a1e9a9cc3942ca70d4b1fa5ddb", + "url": "https://files.pythonhosted.org/packages/18/a6/904c7bd0e6d9d8c223ca7d4f542811078bf960201c9451ed983a18ac2ece/msgpack-1.1.0rc1-cp312-cp312-musllinux_1_1_x86_64.whl" }, { "algorithm": "sha256", - "hash": "ef254a06bcea461e65ff0373d8a0dd1ed3aa004af48839f002a0c994a6f72d04", - "url": "https://files.pythonhosted.org/packages/03/79/ae000bde2aee4b9f0d50c1ca1ab301ade873b59dd6968c28f918d1cf8be4/msgpack-1.0.8-cp312-cp312-musllinux_1_1_i686.whl" + "hash": "1567a7a089cd2dabfdf667f9a555702cfdd6bacc95538e5376e0a0d3e1cfec13", + "url": "https://files.pythonhosted.org/packages/00/04/a9dc5983cb24231eafb8cb474bc15997986ba9925d5c2a143964d243cc25/msgpack-1.1.0rc1-cp312-cp312-musllinux_1_1_aarch64.whl" }, { "algorithm": "sha256", - "hash": "8db8e423192303ed77cff4dce3a4b88dbfaf43979d280181558af5e2c3c71afc", - "url": "https://files.pythonhosted.org/packages/04/2a/c833a8503be9030083f0469e7a3c74d3622a3b4eae676c3934d3ccc01036/msgpack-1.0.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" + "hash": "3a4698fe8974242fccd90e99dd71f963b23bb414d3c1f2bef4c6df662ff7627f", + "url": "https://files.pythonhosted.org/packages/25/24/cac4e9966816f8c7c8e69e95fa86fa3d5bc2eaa406bab23ef28aca15a509/msgpack-1.1.0rc1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl" }, { "algorithm": "sha256", - "hash": "99881222f4a8c2f641f25703963a5cefb076adffd959e0558dc9f803a52d6a58", - "url": "https://files.pythonhosted.org/packages/04/50/b988d0a8e8835f705e4bbcb6433845ff11dd50083c0aa43e607bb7b2ff96/msgpack-1.0.8-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl" + "hash": "6570b5e0a006748b65f652462c872cab2d54648ff2b32e596875480558b81946", + "url": "https://files.pythonhosted.org/packages/3b/05/21b6523ea7286f238c9dc1facae615f576b676633dac23eb718c81ac33f8/msgpack-1.1.0rc1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" }, { "algorithm": "sha256", - "hash": "95c02b0e27e706e48d0e5426d1710ca78e0f0628d6e89d5b5a5b91a5f12274f3", - "url": "https://files.pythonhosted.org/packages/08/4c/17adf86a8fbb02c144c7569dc4919483c01a2ac270307e2d59e1ce394087/msgpack-1.0.8.tar.gz" + "hash": "f5caa3b3b0243516af8e2385746991fddd29d6b7adbfe217e21d8d2fac3101ac", + "url": "https://files.pythonhosted.org/packages/60/41/39f1219e43f6ae363fd518e41bb3b400e58afc1472bc998dda0632ebbd8a/msgpack-1.1.0rc1-cp312-cp312-musllinux_1_1_i686.whl" }, { "algorithm": "sha256", - "hash": "d661dc4785affa9d0edfdd1e59ec056a58b3dbb9f196fa43587f3ddac654ac7b", - "url": "https://files.pythonhosted.org/packages/11/df/558899a5f90d450e988484be25be0b49c6930858d6fe44ea6f1f66502fe5/msgpack-1.0.8-cp312-cp312-macosx_10_9_x86_64.whl" + "hash": "620033ee62234c83ac4ab420d0bdcd0e751f145834997135a197cc865c02eb58", + "url": "https://files.pythonhosted.org/packages/90/ee/fddd0d20802b294b416b6c576f2e26e3b54a8f1628064fe9e39602b2403b/msgpack-1.1.0rc1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl" }, { "algorithm": "sha256", - "hash": "0726c282d188e204281ebd8de31724b7d749adebc086873a59efb8cf7ae27df3", - "url": "https://files.pythonhosted.org/packages/54/f7/84828d0c6be6b7f0770777f1a7b1f76f3a78e8b6afb5e4e9c1c9350242be/msgpack-1.0.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl" + "hash": "a1d3291999cc1af4b23d394b37a6dbf5a0a16da97e50c471008eb3a4ea95ea43", + "url": "https://files.pythonhosted.org/packages/c0/e5/e1126c96f726a0baaa8f814c119cb5358f740497ee617b518497b6037d23/msgpack-1.1.0rc1.tar.gz" }, { "algorithm": "sha256", - "hash": "114be227f5213ef8b215c22dde19532f5da9652e56e8ce969bf0a26d7c419fee", - "url": "https://files.pythonhosted.org/packages/97/73/757eeca26527ebac31d86d35bf4ba20155ee14d35c8619dd96bc80a037f3/msgpack-1.0.8-cp312-cp312-macosx_10_9_universal2.whl" + "hash": "57e45e59f6d45d9bdf4a5a19ae1dd3e151ab42a8dba4bc2aa5e8c4281c9d71d5", + "url": "https://files.pythonhosted.org/packages/ea/2c/e8af682b90733a62af2436b3f27a2ff828bb1ee340e53853b4730ac6f579/msgpack-1.1.0rc1-cp312-cp312-macosx_10_9_universal2.whl" }, { "algorithm": "sha256", - "hash": "b5505774ea2a73a86ea176e8a9a4a7c8bf5d521050f0f6f8426afe798689243f", - "url": "https://files.pythonhosted.org/packages/98/e1/0d18496cbeef771db605b6a14794f9b4235d371f36b43f7223c1613969ec/msgpack-1.0.8-cp312-cp312-musllinux_1_1_aarch64.whl" + "hash": "23425589809b96ad7d5d00e691ae3ab65c1f0934a817b69b244fc236236f3477", + "url": "https://files.pythonhosted.org/packages/eb/9a/1396d6512d1dae98f7c6e95746c9a106f6861ba7966a70a9d8832f1faac0/msgpack-1.1.0rc1-cp312-cp312-macosx_10_9_x86_64.whl" }, { "algorithm": "sha256", - "hash": "d56fd9f1f1cdc8227d7b7918f55091349741904d9520c65f0139a9755952c9e8", - "url": "https://files.pythonhosted.org/packages/99/3e/49d430df1e9abf06bb91e9824422cd6ceead2114662417286da3ddcdd295/msgpack-1.0.8-cp312-cp312-macosx_11_0_arm64.whl" + "hash": "aec097b46b2e2e47229a304379d9b9bfd57845c6e8c0ef078030fcd6f21e04fd", + "url": "https://files.pythonhosted.org/packages/fc/49/d2e7d114ea72546a9d0cc432b22d458726fd6ab24cf49757943c98dc4573/msgpack-1.1.0rc1-cp312-cp312-macosx_11_0_arm64.whl" } ], "project_name": "msgpack", "requires_dists": [], "requires_python": ">=3.8", - "version": "1.0.8" + "version": "1.1.0rc1" }, { "artifacts": [ @@ -2809,19 +2873,19 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5", - "url": "https://files.pythonhosted.org/packages/49/df/1fceb2f8900f8639e278b056416d49134fb8d84c5942ffaa01ad34782422/packaging-24.0-py3-none-any.whl" + "hash": "5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124", + "url": "https://files.pythonhosted.org/packages/08/aa/cc0199a5f0ad350994d660967a8efb233fe0416e4639146c089643407ce6/packaging-24.1-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9", - "url": "https://files.pythonhosted.org/packages/ee/b5/b43a27ac7472e1818c4bafd44430e69605baefe1f34440593e0332ec8b4d/packaging-24.0.tar.gz" + "hash": "026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", + "url": "https://files.pythonhosted.org/packages/51/65/50db4dda066951078f0a96cf12f4b9ada6e4b811516bf0262c0f4f7064d4/packaging-24.1.tar.gz" } ], "project_name": "packaging", "requires_dists": [], - "requires_python": ">=3.7", - "version": "24.0" + "requires_python": ">=3.8", + "version": "24.1" }, { "artifacts": [ @@ -2847,13 +2911,13 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "17d5a1161b3fd67b390023cb2d3b026bbd40abde6fdb052dfbd3a29c3ba22ee1", - "url": "https://files.pythonhosted.org/packages/b0/15/1691fa5aaddc0c4ea4901c26f6137c29d5f6673596fe960a0340e8c308e1/platformdirs-4.2.1-py3-none-any.whl" + "hash": "2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee", + "url": "https://files.pythonhosted.org/packages/68/13/2aa1f0e1364feb2c9ef45302f387ac0bd81484e9c9a4c5688a322fbdfd08/platformdirs-4.2.2-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "031cd18d4ec63ec53e82dceaac0417d218a6863f7745dfcc9efe7793b7039bdf", - "url": "https://files.pythonhosted.org/packages/b2/e4/2856bf61e54d7e3a03dd00d0c1b5fa86e6081e8f262eb91befbe64d20937/platformdirs-4.2.1.tar.gz" + "hash": "38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3", + "url": "https://files.pythonhosted.org/packages/f5/52/0763d1d976d5c262df53ddda8d8d4719eedf9594d046f117c25a27261a19/platformdirs-4.2.2.tar.gz" } ], "project_name": "platformdirs", @@ -2870,7 +2934,7 @@ "sphinx>=7.2.6; extra == \"docs\"" ], "requires_python": ">=3.8", - "version": "4.2.1" + "version": "4.2.2" }, { "artifacts": [ @@ -2899,13 +2963,13 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6", - "url": "https://files.pythonhosted.org/packages/ee/fd/ca7bf3869e7caa7a037e23078539467b433a4e01eebd93f77180ab927766/prompt_toolkit-3.0.43-py3-none-any.whl" + "hash": "0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10", + "url": "https://files.pythonhosted.org/packages/e8/23/22750c4b768f09386d1c3cc4337953e8936f48a888fa6dddfb669b2c9088/prompt_toolkit-3.0.47-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d", - "url": "https://files.pythonhosted.org/packages/cc/c6/25b6a3d5cd295304de1e32c9edbcf319a52e965b339629d37d42bb7126ca/prompt_toolkit-3.0.43.tar.gz" + "hash": "1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360", + "url": "https://files.pythonhosted.org/packages/47/6d/0279b119dafc74c1220420028d490c4399b790fc1256998666e3a341879f/prompt_toolkit-3.0.47.tar.gz" } ], "project_name": "prompt-toolkit", @@ -2913,7 +2977,7 @@ "wcwidth" ], "requires_python": ">=3.7.0", - "version": "3.0.43" + "version": "3.0.47" }, { "artifacts": [ @@ -3262,22 +3326,39 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c", - "url": "https://files.pythonhosted.org/packages/97/9c/372fef8377a6e340b1704768d20daaded98bf13282b5327beb2e2fe2c7ef/pygments-2.17.2-py3-none-any.whl" + "hash": "b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a", + "url": "https://files.pythonhosted.org/packages/f7/3f/01c8b82017c199075f8f788d0d906b9ffbbc5a47dc9918a945e13d5a2bda/pygments-2.18.0-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367", - "url": "https://files.pythonhosted.org/packages/55/59/8bccf4157baf25e4aa5a0bb7fa3ba8600907de105ebc22b0c78cfbf6f565/pygments-2.17.2.tar.gz" + "hash": "786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199", + "url": "https://files.pythonhosted.org/packages/8e/62/8336eff65bcbc8e4cb5d05b55faf041285951b6e80f33e2bff2024788f31/pygments-2.18.0.tar.gz" } ], "project_name": "pygments", "requires_dists": [ - "colorama>=0.4.6; extra == \"windows-terminal\"", - "importlib-metadata; python_version < \"3.8\" and extra == \"plugins\"" + "colorama>=0.4.6; extra == \"windows-terminal\"" ], - "requires_python": ">=3.7", - "version": "2.17.2" + "requires_python": ">=3.8", + "version": "2.18.0" + }, + { + "artifacts": [ + { + "algorithm": "sha256", + "hash": "060e1954d9069f428232a1adda165db0b9d8dfdce1d265d36df7fbff540acfd6", + "url": "https://files.pythonhosted.org/packages/9e/11/a1938340ecb32d71e47ad4914843775011e6e9da59ba1229f181fef3119e/pyhumps-3.8.0-py3-none-any.whl" + }, + { + "algorithm": "sha256", + "hash": "498026258f7ee1a8e447c2e28526c0bea9407f9a59c03260aee4bd6c04d681a3", + "url": "https://files.pythonhosted.org/packages/c4/83/fa6f8fb7accb21f39e8f2b6a18f76f6d90626bdb0a5e5448e5cc9b8ab014/pyhumps-3.8.0.tar.gz" + } + ], + "project_name": "pyhumps", + "requires_dists": [], + "requires_python": null, + "version": "3.8.0" }, { "artifacts": [ @@ -3316,34 +3397,34 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "2a8386cfc11fa9d2c50ee7b2a57e7d898ef90470a7a34c4b949ff59662bb78b7", - "url": "https://files.pythonhosted.org/packages/4d/7e/c79cecfdb6aa85c6c2e3cf63afc56d0f165f24f5c66c03c695c4d9b84756/pytest-8.1.1-py3-none-any.whl" + "hash": "c434598117762e2bd304e526244f67bf66bbd7b5d6cf22138be51ff661980343", + "url": "https://files.pythonhosted.org/packages/4e/e7/81ebdd666d3bff6670d27349b5053605d83d55548e6bd5711f3b0ae7dd23/pytest-8.2.2-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "ac978141a75948948817d360297b7aae0fcb9d6ff6bc9ec6d514b85d5a65c044", - "url": "https://files.pythonhosted.org/packages/30/b7/7d44bbc04c531dcc753056920e0988032e5871ac674b5a84cb979de6e7af/pytest-8.1.1.tar.gz" + "hash": "de4bb8104e201939ccdc688b27a89a7be2079b22e2bd2b07f806b6ba71117977", + "url": "https://files.pythonhosted.org/packages/a6/58/e993ca5357553c966b9e73cb3475d9c935fe9488746e13ebdf9b80fae508/pytest-8.2.2.tar.gz" } ], "project_name": "pytest", "requires_dists": [ - "argcomplete; extra == \"testing\"", - "attrs>=19.2; extra == \"testing\"", + "argcomplete; extra == \"dev\"", + "attrs>=19.2; extra == \"dev\"", "colorama; sys_platform == \"win32\"", "exceptiongroup>=1.0.0rc8; python_version < \"3.11\"", - "hypothesis>=3.56; extra == \"testing\"", + "hypothesis>=3.56; extra == \"dev\"", "iniconfig", - "mock; extra == \"testing\"", + "mock; extra == \"dev\"", "packaging", - "pluggy<2.0,>=1.4", - "pygments>=2.7.2; extra == \"testing\"", - "requests; extra == \"testing\"", - "setuptools; extra == \"testing\"", + "pluggy<2.0,>=1.5", + "pygments>=2.7.2; extra == \"dev\"", + "requests; extra == \"dev\"", + "setuptools; extra == \"dev\"", "tomli>=1; python_version < \"3.11\"", - "xmlschema; extra == \"testing\"" + "xmlschema; extra == \"dev\"" ], "requires_python": ">=3.8", - "version": "8.1.1" + "version": "8.2.2" }, { "artifacts": [ @@ -3401,24 +3482,6 @@ "requires_python": ">=3.5", "version": "0.20.0" }, - { - "artifacts": [ - { - "algorithm": "sha256", - "hash": "1bf6e860a8ad52a14c3ee1252d5dc25b2030618ed80c022598f00176adc8367d", - "url": "https://files.pythonhosted.org/packages/c6/d3/201fc3abe391bbae6606e6f1d598c15d367033332bd54352b12f35513717/python_editor-1.0.4-py3-none-any.whl" - }, - { - "algorithm": "sha256", - "hash": "51fda6bcc5ddbbb7063b2af7509e43bd84bfc32a4ff71349ec7847713882327b", - "url": "https://files.pythonhosted.org/packages/0a/85/78f4a216d28343a67b7397c99825cff336330893f00601443f7c7b2f2234/python-editor-1.0.4.tar.gz" - } - ], - "project_name": "python-editor", - "requires_dists": [], - "requires_python": null, - "version": "1.0.4" - }, { "artifacts": [ { @@ -3539,21 +3602,19 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "b4b31dd35de4897be738f27e8f9f62426b5fedb54b648364987e30ae534b71bc", - "url": "https://files.pythonhosted.org/packages/86/db/aca9e5e6a53a499d61cbd78b3594d700f1e48a50ab6970a92a4d1251f8db/readchar-4.0.6-py3-none-any.whl" + "hash": "d163680656b34f263fb5074023db44b999c68ff31ab394445ebfd1a2a41fe9a2", + "url": "https://files.pythonhosted.org/packages/6b/cd/feba6c20ae4b00d424e6fd802edd4e1557e500501b376c8111a60ba1b83f/readchar-4.1.0-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "e0dae942d3a746f8d5423f83dbad67efe704004baafe31b626477929faaee472", - "url": "https://files.pythonhosted.org/packages/ec/85/35c1a04aa52c432ec604b2816570fb0ab721cb7403191130b9c068c672c3/readchar-4.0.6.tar.gz" + "hash": "6f44d1b5f0fd93bd93236eac7da39609f15df647ab9cea39f5bc7478b3344b99", + "url": "https://files.pythonhosted.org/packages/23/85/a83385c8765af35c3fdd9cf67a387107b99bc545b8559e1f097c9d777dde/readchar-4.1.0.tar.gz" } ], "project_name": "readchar", - "requires_dists": [ - "setuptools>=41.0" - ], + "requires_dists": [], "requires_python": ">=3.8", - "version": "4.0.6" + "version": "4.1.0" }, { "artifacts": [ @@ -3585,13 +3646,13 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f", - "url": "https://files.pythonhosted.org/packages/70/8e/0e2d847013cb52cd35b38c009bb167a1a26b2ce6cd6965bf26b47bc0bf44/requests-2.31.0-py3-none-any.whl" + "hash": "70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", + "url": "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1", - "url": "https://files.pythonhosted.org/packages/9d/be/10918a2eac4ae9f02f6cfe6414b7a155ccd8f7f9d4380d62fd5b955065c3/requests-2.31.0.tar.gz" + "hash": "55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", + "url": "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz" } ], "project_name": "requests", @@ -3603,8 +3664,8 @@ "idna<4,>=2.5", "urllib3<3,>=1.21.1" ], - "requires_python": ">=3.7", - "version": "2.31.0" + "requires_python": ">=3.8", + "version": "2.32.3" }, { "artifacts": [ @@ -3675,13 +3736,33 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "ceb252b11bcf87080fb7850a224fb6e05c8a776bab8f2b64b7f25b969464839d", - "url": "https://files.pythonhosted.org/packages/83/37/395cdb6ee92925fa211e55d8f07b9f93cf93f60d7d4ce5e66fd73f1ea986/s3transfer-0.10.1-py3-none-any.whl" + "hash": "0980dcbc25aba1505f307ac4f0e9e92cbd0be2a15a1e983ee86c24c87b839dfd", + "url": "https://files.pythonhosted.org/packages/86/d6/17caf2e4af1dec288477a0cbbe4a96fbc9b8a28457dce3f1f452630ce216/runs-1.2.2-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "5683916b4c724f799e600f41dd9e10a9ff19871bf87623cc8f491cb4f5fa0a19", - "url": "https://files.pythonhosted.org/packages/83/bc/fb0c1f76517e3380eb142af8a9d6b969c150cfca1324cea7d965d8c66571/s3transfer-0.10.1.tar.gz" + "hash": "9dc1815e2895cfb3a48317b173b9f1eac9ba5549b36a847b5cc60c3bf82ecef1", + "url": "https://files.pythonhosted.org/packages/26/6d/b9aace390f62db5d7d2c77eafce3d42774f27f1829d24fa9b6f598b3ef71/runs-1.2.2.tar.gz" + } + ], + "project_name": "runs", + "requires_dists": [ + "xmod" + ], + "requires_python": ">=3.8", + "version": "1.2.2" + }, + { + "artifacts": [ + { + "algorithm": "sha256", + "hash": "eca1c20de70a39daee580aef4986996620f365c4e0fda6a86100231d62f1bf69", + "url": "https://files.pythonhosted.org/packages/3c/4a/b221409913760d26cf4498b7b1741d510c82d3ad38381984a3ddc135ec66/s3transfer-0.10.2-py3-none-any.whl" + }, + { + "algorithm": "sha256", + "hash": "0711534e9356d3cc692fdde846b4a1e4b0cb6519971860796e6bc4c7aea00ef6", + "url": "https://files.pythonhosted.org/packages/cb/67/94c6730ee4c34505b14d94040e2f31edf144c230b6b49e971b4f25ff8fab/s3transfer-0.10.2.tar.gz" } ], "project_name": "s3transfer", @@ -3690,7 +3771,7 @@ "botocore[crt]<2.0a.0,>=1.33.2; extra == \"crt\"" ], "requires_python": ">=3.8", - "version": "0.10.1" + "version": "0.10.2" }, { "artifacts": [ @@ -3761,67 +3842,60 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "c636ac361bc47580504644275c9ad802c50415c7522212252c033bd15f301f32", - "url": "https://files.pythonhosted.org/packages/f7/29/13965af254e3373bceae8fb9a0e6ea0d0e571171b80d6646932131d6439b/setuptools-69.5.1-py3-none-any.whl" + "hash": "fe384da74336c398e0d956d1cae0669bc02eed936cdb1d49b57de1990dc11ffc", + "url": "https://files.pythonhosted.org/packages/ef/15/88e46eb9387e905704b69849618e699dc2f54407d8953cc4ec4b8b46528d/setuptools-70.3.0-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "6c1fccdac05a97e598fb0ae3bbed5904ccb317337a51139dcd51453611bbb987", - "url": "https://files.pythonhosted.org/packages/d6/4f/b10f707e14ef7de524fe1f8988a294fb262a29c9b5b12275c7e188864aed/setuptools-69.5.1.tar.gz" + "hash": "f171bab1dfbc86b132997f26a119f6056a57950d058587841a0082e8830f9dc5", + "url": "https://files.pythonhosted.org/packages/65/d8/10a70e86f6c28ae59f101a9de6d77bf70f147180fbf40c3af0f64080adc3/setuptools-70.3.0.tar.gz" } ], "project_name": "setuptools", "requires_dists": [ - "build[virtualenv]; extra == \"testing\"", - "build[virtualenv]>=1.0.3; extra == \"testing-integration\"", - "filelock>=3.4.0; extra == \"testing\"", - "filelock>=3.4.0; extra == \"testing-integration\"", - "furo; extra == \"docs\"", - "importlib-metadata; extra == \"testing\"", - "ini2toml[lite]>=0.9; extra == \"testing\"", - "jaraco.develop>=7.21; (python_version >= \"3.9\" and sys_platform != \"cygwin\") and extra == \"testing\"", - "jaraco.envs>=2.2; extra == \"testing\"", - "jaraco.envs>=2.2; extra == \"testing-integration\"", - "jaraco.packaging>=9.3; extra == \"docs\"", - "jaraco.path>=3.2.0; extra == \"testing\"", - "jaraco.path>=3.2.0; extra == \"testing-integration\"", - "jaraco.tidelift>=1.4; extra == \"docs\"", - "mypy==1.9; extra == \"testing\"", - "packaging>=23.2; extra == \"testing\"", - "packaging>=23.2; extra == \"testing-integration\"", - "pip>=19.1; extra == \"testing\"", - "pygments-github-lexers==0.0.5; extra == \"docs\"", - "pytest!=8.1.1,>=6; extra == \"testing\"", - "pytest-checkdocs>=2.4; extra == \"testing\"", - "pytest-cov; platform_python_implementation != \"PyPy\" and extra == \"testing\"", - "pytest-enabler; extra == \"testing-integration\"", - "pytest-enabler>=2.2; extra == \"testing\"", - "pytest-home>=0.5; extra == \"testing\"", - "pytest-mypy; extra == \"testing\"", - "pytest-perf; sys_platform != \"cygwin\" and extra == \"testing\"", - "pytest-ruff>=0.2.1; sys_platform != \"cygwin\" and extra == \"testing\"", - "pytest-timeout; extra == \"testing\"", - "pytest-xdist; extra == \"testing-integration\"", - "pytest-xdist>=3; extra == \"testing\"", - "pytest; extra == \"testing-integration\"", - "rst.linker>=1.9; extra == \"docs\"", - "sphinx-favicon; extra == \"docs\"", - "sphinx-inline-tabs; extra == \"docs\"", - "sphinx-lint; extra == \"docs\"", - "sphinx-notfound-page<2,>=1; extra == \"docs\"", - "sphinx-reredirects; extra == \"docs\"", - "sphinx>=3.5; extra == \"docs\"", - "sphinxcontrib-towncrier; extra == \"docs\"", - "tomli-w>=1.0.0; extra == \"testing\"", - "tomli; extra == \"testing\"", - "tomli; extra == \"testing-integration\"", - "virtualenv>=13.0.0; extra == \"testing\"", - "virtualenv>=13.0.0; extra == \"testing-integration\"", - "wheel; extra == \"testing\"", - "wheel; extra == \"testing-integration\"" + "build[virtualenv]>=1.0.3; extra == \"test\"", + "filelock>=3.4.0; extra == \"test\"", + "furo; extra == \"doc\"", + "importlib-metadata; extra == \"test\"", + "ini2toml[lite]>=0.14; extra == \"test\"", + "jaraco.develop>=7.21; (python_version >= \"3.9\" and sys_platform != \"cygwin\") and extra == \"test\"", + "jaraco.envs>=2.2; extra == \"test\"", + "jaraco.packaging>=9.3; extra == \"doc\"", + "jaraco.path>=3.2.0; extra == \"test\"", + "jaraco.test; extra == \"test\"", + "jaraco.tidelift>=1.4; extra == \"doc\"", + "mypy==1.10.0; extra == \"test\"", + "packaging>=23.2; extra == \"test\"", + "pip>=19.1; extra == \"test\"", + "pygments-github-lexers==0.0.5; extra == \"doc\"", + "pyproject-hooks!=1.1; extra == \"doc\"", + "pyproject-hooks!=1.1; extra == \"test\"", + "pytest!=8.1.*,>=6; extra == \"test\"", + "pytest-checkdocs>=2.4; extra == \"test\"", + "pytest-cov; extra == \"test\"", + "pytest-enabler>=2.2; extra == \"test\"", + "pytest-home>=0.5; extra == \"test\"", + "pytest-mypy; extra == \"test\"", + "pytest-perf; sys_platform != \"cygwin\" and extra == \"test\"", + "pytest-ruff>=0.3.2; sys_platform != \"cygwin\" and extra == \"test\"", + "pytest-subprocess; extra == \"test\"", + "pytest-timeout; extra == \"test\"", + "pytest-xdist>=3; extra == \"test\"", + "rst.linker>=1.9; extra == \"doc\"", + "sphinx-favicon; extra == \"doc\"", + "sphinx-inline-tabs; extra == \"doc\"", + "sphinx-lint; extra == \"doc\"", + "sphinx-notfound-page<2,>=1; extra == \"doc\"", + "sphinx-reredirects; extra == \"doc\"", + "sphinx>=3.5; extra == \"doc\"", + "sphinxcontrib-towncrier; extra == \"doc\"", + "tomli-w>=1.0.0; extra == \"test\"", + "tomli; extra == \"test\"", + "virtualenv>=13.0.0; extra == \"test\"", + "wheel; extra == \"test\"" ], "requires_python": ">=3.8", - "version": "69.5.1" + "version": "70.3.0" }, { "artifacts": [ @@ -3937,23 +4011,25 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "ce510e327a630c9e1beaf17d42e6ffacc88185044ad85cf74c0a8887c6a0f88c", - "url": "https://files.pythonhosted.org/packages/f4/f1/990741d5bb2487d529d20a433210ffa136a367751e454214013b441c4575/tenacity-8.2.3-py3-none-any.whl" + "hash": "b594c2a5945830c267ce6b79a166228323ed52718f30302c1359836112346687", + "url": "https://files.pythonhosted.org/packages/d2/3f/8ba87d9e287b9d385a02a7114ddcef61b26f86411e121c9003eb509a1773/tenacity-8.5.0-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "5398ef0d78e63f40007c1fb4c0bff96e1911394d2fa8d194f77619c05ff6cc8a", - "url": "https://files.pythonhosted.org/packages/89/3c/253e1627262373784bf9355db9d6f20d2d8831d79f91e9cca48050cddcc2/tenacity-8.2.3.tar.gz" + "hash": "8bc6c0c8a09b31e6cad13c47afbed1a567518250a9a171418582ed8d9c20ca78", + "url": "https://files.pythonhosted.org/packages/a3/4d/6a19536c50b849338fcbe9290d562b52cbdcf30d8963d3588a68a4107df1/tenacity-8.5.0.tar.gz" } ], "project_name": "tenacity", "requires_dists": [ + "pytest; extra == \"test\"", "reno; extra == \"doc\"", "sphinx; extra == \"doc\"", - "tornado>=4.5; extra == \"doc\"" + "tornado>=4.5; extra == \"test\"", + "typeguard; extra == \"test\"" ], - "requires_python": ">=3.7", - "version": "8.2.3" + "requires_python": ">=3.8", + "version": "8.5.0" }, { "artifacts": [ @@ -4019,84 +4095,84 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "5cd82d48a3dd89dee1f9d64420aa20ae65cfbd00668d6f094d7578a78efbb77b", - "url": "https://files.pythonhosted.org/packages/07/fa/c96545d741f2fd47f565e4e06bfef0962add790cb9c2289d900102b55eca/tomlkit-0.12.4-py3-none-any.whl" + "hash": "af914f5a9c59ed9d0762c7b64d3b5d5df007448eb9cd2edc8a46b1eafead172f", + "url": "https://files.pythonhosted.org/packages/73/6d/b5406752c4e4ba86692b22fab0afed8b48f16bdde8f92e1d852976b61dc6/tomlkit-0.12.5-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "7ca1cfc12232806517a8515047ba66a19369e71edf2439d0f5824f91032b6cc3", - "url": "https://files.pythonhosted.org/packages/7d/49/4c0764898ee67618996148bdba4534a422c5e698b4dbf4001f7c6f930797/tomlkit-0.12.4.tar.gz" + "hash": "eef34fba39834d4d6b73c9ba7f3e4d1c417a4e56f89a7e96e090dd0d24b8fb3c", + "url": "https://files.pythonhosted.org/packages/2b/ab/18f4c8f2bec75eb1a7aebcc52cdb02ab04fd39ff7025bb1b1c7846cc45b8/tomlkit-0.12.5.tar.gz" } ], "project_name": "tomlkit", "requires_dists": [], "requires_python": ">=3.7", - "version": "0.12.4" + "version": "0.12.5" }, { "artifacts": [ { "algorithm": "sha256", - "hash": "71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f", - "url": "https://files.pythonhosted.org/packages/25/a3/1025f561b87b3cca6f66da149ba7ce4c2bb18d7bd6b84cd5a13a274e9dd3/tornado-6.4-cp38-abi3-musllinux_1_1_x86_64.whl" + "hash": "a02a08cc7a9314b006f653ce40483b9b3c12cda222d6a46d4ac63bb6c9057698", + "url": "https://files.pythonhosted.org/packages/94/d4/f8ac1f5bd22c15fad3b527e025ce219bd526acdbd903f52053df2baecc8b/tornado-6.4.1-cp38-abi3-musllinux_1_2_x86_64.whl" }, { "algorithm": "sha256", - "hash": "e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579", - "url": "https://files.pythonhosted.org/packages/0e/76/aca8c8726d045c1c7b093cca3c5551e8df444ef74ba0dfd1f205da1f95db/tornado-6.4-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl" + "hash": "163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8", + "url": "https://files.pythonhosted.org/packages/00/d9/c33be3c1a7564f7d42d87a8d186371a75fd142097076767a5c27da941fef/tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl" }, { "algorithm": "sha256", - "hash": "27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263", - "url": "https://files.pythonhosted.org/packages/34/7a/e7ec972db24513ea69ac7132c1a5eef62309dc978566a4afffa314417a45/tornado-6.4-cp38-abi3-macosx_10_9_x86_64.whl" + "hash": "e2e20b9113cd7293f164dc46fffb13535266e713cdb87bd2d15ddb336e96cfc4", + "url": "https://files.pythonhosted.org/packages/13/cf/786b8f1e6fe1c7c675e79657448178ad65e41c1c9765ef82e7f6f765c4c5/tornado-6.4.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl" }, { "algorithm": "sha256", - "hash": "02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0", - "url": "https://files.pythonhosted.org/packages/4a/2e/3ba930e3af171847d610e07ae878e04fcb7e4d7822f1a2d29a27b128ea24/tornado-6.4-cp38-abi3-macosx_10_9_universal2.whl" + "hash": "613bf4ddf5c7a95509218b149b555621497a6cc0d46ac341b30bd9ec19eac7f3", + "url": "https://files.pythonhosted.org/packages/22/d4/54f9d12668b58336bd30defe0307e6c61589a3e687b05c366f804b7faaf0/tornado-6.4.1-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl" }, { "algorithm": "sha256", - "hash": "f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e", - "url": "https://files.pythonhosted.org/packages/62/e5/3ee2ba523a13bae5c17d1658580d13597116c1639374ca5033d58fd04724/tornado-6.4-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl" + "hash": "6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14", + "url": "https://files.pythonhosted.org/packages/2e/0f/721e113a2fac2f1d7d124b3279a1da4c77622e104084f56119875019ffab/tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl" }, { "algorithm": "sha256", - "hash": "fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2", - "url": "https://files.pythonhosted.org/packages/66/e5/466aa544e0cbae9b0ece79cd42db257fa7bfa3197c853e3f7921b3963190/tornado-6.4-cp38-abi3-musllinux_1_1_aarch64.whl" + "hash": "454db8a7ecfcf2ff6042dde58404164d969b6f5d58b926da15e6b23817950fc4", + "url": "https://files.pythonhosted.org/packages/71/63/c8fc62745e669ac9009044b889fc531b6f88ac0f5f183cac79eaa950bb23/tornado-6.4.1-cp38-abi3-musllinux_1_2_i686.whl" }, { "algorithm": "sha256", - "hash": "f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212", - "url": "https://files.pythonhosted.org/packages/9f/12/11d0a757bb67278d3380d41955ae98527d5ad18330b2edbdc8de222b569b/tornado-6.4-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl" + "hash": "25486eb223babe3eed4b8aecbac33b37e3dd6d776bc730ca14e1bf93888b979f", + "url": "https://files.pythonhosted.org/packages/cf/3f/2c792e7afa7dd8b24fad7a2ed3c2f24a5ec5110c7b43a64cb6095cc106b8/tornado-6.4.1-cp38-abi3-musllinux_1_2_aarch64.whl" }, { "algorithm": "sha256", - "hash": "72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee", - "url": "https://files.pythonhosted.org/packages/bd/a2/ea124343e3b8dd7712561fe56c4f92eda26865f5e1040b289203729186f2/tornado-6.4.tar.gz" + "hash": "8ae50a504a740365267b2a8d1a90c9fbc86b780a39170feca9bcc1787ff80842", + "url": "https://files.pythonhosted.org/packages/e4/8e/a6ce4b8d5935558828b0f30f3afcb2d980566718837b3365d98e34f6067e/tornado-6.4.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl" }, { "algorithm": "sha256", - "hash": "88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78", - "url": "https://files.pythonhosted.org/packages/e2/40/bcf0af5a29a850bf5ad7f79ef51c054f99e18d9cdf4efd6eeb0df819641f/tornado-6.4-cp38-abi3-musllinux_1_1_i686.whl" + "hash": "92d3ab53183d8c50f8204a51e6f91d18a15d5ef261e84d452800d4ff6fc504e9", + "url": "https://files.pythonhosted.org/packages/ee/66/398ac7167f1c7835406888a386f6d0d26ee5dbf197d8a571300be57662d3/tornado-6.4.1.tar.gz" } ], "project_name": "tornado", "requires_dists": [], "requires_python": ">=3.8", - "version": "6.4" + "version": "6.4.1" }, { "artifacts": [ { "algorithm": "sha256", - "hash": "1ee4f8a893eb9bef51c6e35730cebf234d5d0b6bd112b0271e10ed7c24a02bd9", - "url": "https://files.pythonhosted.org/packages/2a/14/e75e52d521442e2fcc9f1df3c5e456aead034203d4797867980de558ab34/tqdm-4.66.2-py3-none-any.whl" + "hash": "b75ca56b413b030bc3f00af51fd2c1a1a5eac6a0c1cca83cbb37a5c52abce644", + "url": "https://files.pythonhosted.org/packages/18/eb/fdb7eb9e48b7b02554e1664afd3bd3f117f6b6d6c5881438a0b055554f9b/tqdm-4.66.4-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "6cd52cdf0fef0e0f543299cfc96fec90d7b8a7e88745f411ec33eb44d5ed3531", - "url": "https://files.pythonhosted.org/packages/ea/85/3ce0f9f7d3f596e7ea57f4e5ce8c18cb44e4a9daa58ddb46ee0d13d6bff8/tqdm-4.66.2.tar.gz" + "hash": "e4d936c9de8727928f3be6079590e97d9abfe8d39a590be678eb5919ffc186bb", + "url": "https://files.pythonhosted.org/packages/5a/c0/b7599d6e13fe0844b0cda01b9aaef9a0e87dbb10b06e4ee255d3fa1c79a2/tqdm-4.66.4.tar.gz" } ], "project_name": "tqdm", @@ -4111,7 +4187,7 @@ "slack-sdk; extra == \"slack\"" ], "requires_python": ">=3.7", - "version": "4.66.2" + "version": "4.66.4" }, { "artifacts": [ @@ -4210,19 +4286,19 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "adeeb4b999f19fda2dfe91c07857ff54701b6ee9b227b523a5a7be92125a2c5f", - "url": "https://files.pythonhosted.org/packages/6d/9a/99f5c07594b4ecfb370fc87e0bdcbfd48f2fa36d48a973135f77109c253b/types_aiofiles-23.2.0.20240403-py3-none-any.whl" + "hash": "7939eca4a8b4f9c6491b6e8ef160caee9a21d32e18534a57d5ed90aee47c66b4", + "url": "https://files.pythonhosted.org/packages/c3/ad/c4b3275d21c5be79487c4f6ed7cd13336997746fe099236cb29256a44a90/types_aiofiles-24.1.0.20240626-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "1ffcf8f5f72b81f71139f754ea2610ab0017f27ba4fd771e187b07840ee49c0f", - "url": "https://files.pythonhosted.org/packages/3a/0c/bac7fd8554104fa72668fb9a3bae188dc78a1ef12b84925d5e9b60489ef3/types-aiofiles-23.2.0.20240403.tar.gz" + "hash": "48604663e24bc2d5038eac05ccc33e75799b0779e93e13d6a8f711ddc306ac08", + "url": "https://files.pythonhosted.org/packages/13/e9/013940b017c313c2e15c64017268fdb0c25e0638621fb8a5d9ebe00fb0f4/types-aiofiles-24.1.0.20240626.tar.gz" } ], "project_name": "types-aiofiles", "requires_dists": [], "requires_python": ">=3.8", - "version": "23.2.0.20240403" + "version": "24.1.0.20240626" }, { "artifacts": [ @@ -4382,37 +4458,37 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "a4381e041510755a6c9210e26ad55b1629bc10237aeb9cb8b6bd24996b73db48", - "url": "https://files.pythonhosted.org/packages/c3/00/3413afcbbf152442034ffeec6ca66f48939926ee50aa9a22ee8a39e26050/types_setuptools-69.5.0.20240423-py3-none-any.whl" + "hash": "bd0db2a4b9f2c49ac5564be4e0fb3125c4c46b1f73eafdcbceffa5b005cceca4", + "url": "https://files.pythonhosted.org/packages/c3/be/60f6258da5989be4bfe1fdb1c10d4b5a722f4ca2656b20ffe1276a9d33e2/types_setuptools-70.3.0.20240710-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "a7ba908f1746c4337d13f027fa0f4a5bcad6d1d92048219ba792b3295c58586d", - "url": "https://files.pythonhosted.org/packages/12/c7/10593c47ad543413eaf25bba1979a3c5a4bf3f42e505dd28869c618a764c/types-setuptools-69.5.0.20240423.tar.gz" + "hash": "842cbf399812d2b65042c9d6ff35113bbf282dee38794779aa1f94e597bafc35", + "url": "https://files.pythonhosted.org/packages/78/89/081fb601a795995d0032d024bc71bf0a0c835a566f06c18c88a220f950e5/types-setuptools-70.3.0.20240710.tar.gz" } ], "project_name": "types-setuptools", "requires_dists": [], "requires_python": ">=3.8", - "version": "69.5.0.20240423" + "version": "70.3.0.20240710" }, { "artifacts": [ { "algorithm": "sha256", - "hash": "1e924f823cdc2142670c942527f71b49912e6954b3b85388cd0cc1259ad4bfcf", - "url": "https://files.pythonhosted.org/packages/ab/a5/ba68e41d7070fbcdbb359ad4c57aa455d6b07348d468214f643097f47d0a/types_six-1.16.21.20240425-py3-none-any.whl" + "hash": "af2a105be6d504339bfed81319cc8e8697865f0ee5c6baa63658f127b33b9e63", + "url": "https://files.pythonhosted.org/packages/46/54/a2747d2710c82c8d138ea574435f086b6b78dce634880c843c05aa9ca523/types_six-1.16.21.20240513-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "378f8baa5b693e4f8a284e8f7b0b34c682210848900816788e19ee4712de8f59", - "url": "https://files.pythonhosted.org/packages/04/b8/34a12ef6b1f046590fc5f8ab15b5756a39f4edea1fe7b7a84ab6a7859c01/types-six-1.16.21.20240425.tar.gz" + "hash": "cdf445b5161bf17753500713a475ab79a45bd0d87728b8bfcecd86e2fbf66402", + "url": "https://files.pythonhosted.org/packages/1b/2a/d786db60b07bf61a16469f6781138799b3e16f87fe4b27fb9454bc01e74a/types-six-1.16.21.20240513.tar.gz" } ], "project_name": "types-six", "requires_dists": [], "requires_python": ">=3.8", - "version": "1.16.21.20240425" + "version": "1.16.21.20240513" }, { "artifacts": [ @@ -4436,19 +4512,19 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a", - "url": "https://files.pythonhosted.org/packages/01/f3/936e209267d6ef7510322191003885de524fc48d1b43269810cd589ceaf5/typing_extensions-4.11.0-py3-none-any.whl" + "hash": "04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", + "url": "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0", - "url": "https://files.pythonhosted.org/packages/f6/f3/b827b3ab53b4e3d8513914586dcca61c355fa2ce8252dea4da56e67bf8f2/typing_extensions-4.11.0.tar.gz" + "hash": "1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", + "url": "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz" } ], "project_name": "typing-extensions", "requires_dists": [], "requires_python": ">=3.8", - "version": "4.11.0" + "version": "4.12.2" }, { "artifacts": [ @@ -4498,13 +4574,13 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d", - "url": "https://files.pythonhosted.org/packages/a2/73/a68704750a7679d0b6d3ad7aa8d4da8e14e151ae82e6fee774e6e0d05ec8/urllib3-2.2.1-py3-none-any.whl" + "hash": "a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472", + "url": "https://files.pythonhosted.org/packages/ca/1c/89ffc63a9605b583d5df2be791a27bc1a42b7c32bab68d3c8f2f73a98cd4/urllib3-2.2.2-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19", - "url": "https://files.pythonhosted.org/packages/7a/50/7fd50a27caa0652cd4caf224aa87741ea41d3265ad13f010886167cfcc79/urllib3-2.2.1.tar.gz" + "hash": "dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168", + "url": "https://files.pythonhosted.org/packages/43/6d/fa469ae21497ddc8bc93e5877702dca7cb8f911e337aca7452b5724f1bb6/urllib3-2.2.2.tar.gz" } ], "project_name": "urllib3", @@ -4516,7 +4592,7 @@ "zstandard>=0.18.0; extra == \"zstd\"" ], "requires_python": ">=3.8", - "version": "2.2.1" + "version": "2.2.2" }, { "artifacts": [ @@ -4618,6 +4694,24 @@ "requires_python": ">=3.8", "version": "1.8.0" }, + { + "artifacts": [ + { + "algorithm": "sha256", + "hash": "a24e9458a4853489042522bdca9e50ee2eac5ab75c809a91150a8a7f40670d48", + "url": "https://files.pythonhosted.org/packages/33/6b/0dc75b64a764ea1cb8e4c32d1fb273c147304d4e5483cd58be482dc62e45/xmod-1.8.1-py3-none-any.whl" + }, + { + "algorithm": "sha256", + "hash": "38c76486b9d672c546d57d8035df0beb7f4a9b088bc3fb2de5431ae821444377", + "url": "https://files.pythonhosted.org/packages/72/b2/e3edc608823348e628a919e1d7129e641997afadd946febdd704aecc5881/xmod-1.8.1.tar.gz" + } + ], + "project_name": "xmod", + "requires_dists": [], + "requires_python": ">=3.8", + "version": "1.8.1" + }, { "artifacts": [ { @@ -4729,9 +4823,10 @@ ], "only_builds": [], "only_wheels": [], + "overridden": [], "path_mappings": {}, - "pex_version": "2.3.0", - "pip_version": "24.0", + "pex_version": "2.10.0", + "pip_version": "24.1.2", "prefer_older_binary": false, "requirements": [ "Jinja2~=3.1.2", @@ -4740,9 +4835,10 @@ "SQLAlchemy[postgresql_asyncpg]~=1.4.40", "aiodataloader-ng~=0.2.1", "aiodns>=3.0", - "aiodocker~=0.21.0", + "aiodocker==0.22.1", "aiofiles~=23.2.1", "aiohttp_cors~=0.7", + "aiohttp_jinja2~=1.6", "aiohttp_sse>=2.0", "aiohttp~=3.9.1", "aiomonitor~=0.7.0", @@ -4777,7 +4873,7 @@ "hiredis>=2.2.3", "humanize>=3.1.0", "ifaddr~=0.2", - "inquirer~=2.9.2", + "inquirer~=3.3.0", "janus~=1.0.0", "jupyter-client>=6.0", "kubernetes-asyncio~=9.1.0", @@ -4793,6 +4889,7 @@ "psutil~=5.9.1", "pycryptodome>=3.14.1", "pydantic~=2.6.4", + "pyhumps~=3.8.0", "pytest-dependency>=0.5.1", "pytest>=7.3.1", "python-dateutil>=2.8", @@ -4802,6 +4899,7 @@ "redis[hiredis]==4.5.5", "rich~=13.6", "setproctitle~=1.3.2", + "setuptools~=70.3.0", "tabulate~=0.8.9", "temporenc~=0.1.0", "tenacity>=8.0", @@ -4827,7 +4925,7 @@ "zipstream-new~=1.1.8" ], "requires_python": [ - "==3.12.2" + "==3.12.4" ], "resolver_version": "pip-2020-resolver", "style": "universal", diff --git a/requirements.txt b/requirements.txt index 64a0eb2fa31..3b4bc572a92 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,9 @@ aiodataloader-ng~=0.2.1 -aiodocker~=0.21.0 +aiodocker==0.22.1 aiofiles~=23.2.1 aiohttp~=3.9.1 aiohttp_cors~=0.7 +aiohttp_jinja2~=1.6 aiohttp_sse>=2.0 aiodns>=3.0 aiomonitor~=0.7.0 @@ -33,7 +34,7 @@ graphene~=3.3.0 graypy==2.1.0 humanize>=3.1.0 ifaddr~=0.2 -inquirer~=2.9.2 +inquirer~=3.3.0 janus~=1.0.0 Jinja2~=3.1.2 jupyter-client>=6.0 @@ -48,6 +49,7 @@ networkx~=3.3.0 pexpect~=4.8 psutil~=5.9.1 pycryptodome>=3.14.1 +pyhumps~=3.8.0 python-dateutil>=2.8 python-dotenv~=0.20.0 python-json-logger>=2.0.1 @@ -61,6 +63,7 @@ redis[hiredis]==4.5.5 rich~=13.6 SQLAlchemy[postgresql_asyncpg]~=1.4.40 setproctitle~=1.3.2 +setuptools~=70.3.0 tabulate~=0.8.9 temporenc~=0.1.0 tenacity>=8.0 diff --git a/scripts/bootstrap-static-python.sh b/scripts/bootstrap-static-python.sh index 5d2fc4d8830..5d318a0b46d 100755 --- a/scripts/bootstrap-static-python.sh +++ b/scripts/bootstrap-static-python.sh @@ -10,7 +10,7 @@ has_python() { } install_static_python() { - local build_date="20240224" + local build_date="20240713" local build_version="${STANDALONE_PYTHON_VERSION}" local build_tag="cpython-${build_version}+${build_date}-${STANDALONE_PYTHON_ARCH}-${STANDALONE_PYTHON_PLATFORM}" dist_url="https://github.com/indygreg/python-build-standalone/releases/download/${build_date}/${build_tag}-install_only.tar.gz" @@ -30,7 +30,7 @@ install_static_python() { cd "${cwd}" } -STANDALONE_PYTHON_VERSION="3.12.2" +STANDALONE_PYTHON_VERSION="3.12.4" STANDALONE_PYTHON_ARCH=$(arch) if [[ "$OSTYPE" == "linux-gnu"* ]]; then STANDALONE_PYTHON_PLATFORM="unknown-linux-gnu" diff --git a/scripts/build-scies.sh b/scripts/build-scies.sh index 4f1151549d4..9937a8965cd 100755 --- a/scripts/build-scies.sh +++ b/scripts/build-scies.sh @@ -28,9 +28,4 @@ case "$SYSTEM" in CHECKSUM_CMD="shasum -a 256" ;; esac -mkdir -p dist -# Normalize the package naming -unzip "src/ai/backend/web/assets/backend.ai-local-proxy-$SRC_PLATFORM.zip" -mv "backend.ai-local-proxy" "dist/backendai-local-proxy-$DST_PLATFORM" -cd dist -ls backendai-local-proxy-* | grep -v '.sha256' | xargs -I{} sh -c "$CHECKSUM_CMD {} > {}.sha256" + diff --git a/scripts/clear-test-containers.sh b/scripts/clear-test-containers.sh new file mode 100755 index 00000000000..2ad6a765f51 --- /dev/null +++ b/scripts/clear-test-containers.sh @@ -0,0 +1,5 @@ +#! /bin/bash + +docker ps -a -q --filter 'name=^test-' | xargs -r docker rm -f -v +docker network ls --filter 'name=^testnet-' --format '{{.ID}}' | xargs -r docker network rm +rm -rf ~/.cache/bai/testing/* diff --git a/scripts/delete-dev.sh b/scripts/delete-dev.sh index 877e4360940..600e977540c 100755 --- a/scripts/delete-dev.sh +++ b/scripts/delete-dev.sh @@ -90,7 +90,7 @@ else fi show_info "Checking the bootstrapper Python version..." -STANDALONE_PYTHON_VERSION="3.12.2" +STANDALONE_PYTHON_VERSION="3.12.4" STANDALONE_PYTHON_PATH="$HOME/.cache/bai/bootstrap/cpython/${STANDALONE_PYTHON_VERSION}" bpython="${STANDALONE_PYTHON_PATH}/bin/python3" if [ $(has_python "$bpython") -ne 0 ]; then diff --git a/scripts/install-dev.sh b/scripts/install-dev.sh index 55d6b9ff164..6c3067bc949 100755 --- a/scripts/install-dev.sh +++ b/scripts/install-dev.sh @@ -175,6 +175,8 @@ show_guide() { echo " > ${WHITE}./py -m ai.backend.storage.server${NC}" show_note "How to run Backend.AI web server (for ID/Password login and Web UI):" echo " > ${WHITE}./py -m ai.backend.web.server${NC}" + show_note "How to run Backend.AI wsproxy:" + echo " > ${WHITE}./py -m ai.backend.wsproxy.server${NC}" echo " ${LRED}DO NOT source env-local-*.sh in the shell where you run the web server" echo " to prevent misbehavior of the client used inside the web server.${NC}" show_info "How to run your first code:" @@ -193,8 +195,6 @@ show_guide() { echo " > ${WHITE}cd src/ai/backend/webui; npm run build:d${NC}" echo "(Terminal 2)" echo " > ${WHITE}cd src/ai/backend/webui; npm run server:d${NC}" - echo "(Terminal 3)" - echo " > ${WHITE}cd src/ai/backend/webui; npm run wsproxy${NC}" echo "If you just run ${WHITE}./py -m ai.backend.web.server${NC}, it will use the local version compiled from the checked out source." fi show_info "Manual configuration for the client accessible hostname in various proxies" @@ -620,9 +620,8 @@ install_editable_webui() { echo "PROXYBASEHOST=localhost" >> .env echo "PROXYBASEPORT=${WSPROXY_PORT}" >> .env fi - npm i + pnpm i make compile - make compile_wsproxy cd ../../../.. } @@ -729,9 +728,6 @@ setup_environment() { show_info "Ensuring checkout of LFS files..." git lfs pull - show_info "Ensuring checkout of submodules..." - git submodule update --init --checkout --recursive - show_info "Configuring the standard git hooks..." install_git_hooks @@ -923,6 +919,9 @@ configure_backendai() { cp configs/webserver/halfstack.conf ./webserver.conf sed_inplace "s/https:\/\/api.backend.ai/http:\/\/127.0.0.1:${MANAGER_PORT}/" ./webserver.conf + # configure wsproxy + cp configs/wsproxy/halfstack.toml ./wsproxy.toml + if [ $CONFIGURE_HA -eq 1 ]; then sed_inplace "s/redis.addr = \"localhost:6379\"/# redis.addr = \"localhost:6379\"/" ./webserver.conf sed_inplace "s/# redis.password = \"mysecret\"/redis.password = \"develove\"/" ./webserver.conf @@ -1014,8 +1013,8 @@ configure_backendai() { echo "export BACKEND_ENDPOINT_TYPE=session" >> "${CLIENT_ADMIN_CONF_FOR_SESSION}" echo "echo 'Run backend.ai login to make an active session.'" >> "${CLIENT_ADMIN_CONF_FOR_SESSION}" - echo "echo 'Username: $(cat fixtures/manager/example-keypairs.json | jq -r '.users[] | select(.username=="admin") | .email')'" >> "${CLIENT_ADMIN_CONF_FOR_SESSION}" - echo "echo 'Password: $(cat fixtures/manager/example-keypairs.json | jq -r '.users[] | select(.username=="admin") | .password')'" >> "${CLIENT_ADMIN_CONF_FOR_SESSION}" + echo "echo 'Username: $(cat fixtures/manager/example-users.json | jq -r '.users[] | select(.username=="admin") | .email')'" >> "${CLIENT_ADMIN_CONF_FOR_SESSION}" + echo "echo 'Password: $(cat fixtures/manager/example-users.json | jq -r '.users[] | select(.username=="admin") | .password')'" >> "${CLIENT_ADMIN_CONF_FOR_SESSION}" chmod +x "${CLIENT_ADMIN_CONF_FOR_SESSION}" CLIENT_DOMAINADMIN_CONF_FOR_API="env-local-domainadmin-api.sh" CLIENT_DOMAINADMIN_CONF_FOR_SESSION="env-local-domainadmin-session.sh" @@ -1041,8 +1040,8 @@ configure_backendai() { echo "export BACKEND_ENDPOINT_TYPE=session" >> "${CLIENT_DOMAINADMIN_CONF_FOR_SESSION}" echo "echo 'Run backend.ai login to make an active session.'" >> "${CLIENT_DOMAINADMIN_CONF_FOR_SESSION}" - echo "echo 'Username: $(cat fixtures/manager/example-keypairs.json | jq -r '.users[] | select(.username=="domain-admin") | .email')'" >> "${CLIENT_DOMAINADMIN_CONF_FOR_SESSION}" - echo "echo 'Password: $(cat fixtures/manager/example-keypairs.json | jq -r '.users[] | select(.username=="domain-admin") | .password')'" >> "${CLIENT_DOMAINADMIN_CONF_FOR_SESSION}" + echo "echo 'Username: $(cat fixtures/manager/example-users.json | jq -r '.users[] | select(.username=="domain-admin") | .email')'" >> "${CLIENT_DOMAINADMIN_CONF_FOR_SESSION}" + echo "echo 'Password: $(cat fixtures/manager/example-users.json | jq -r '.users[] | select(.username=="domain-admin") | .password')'" >> "${CLIENT_DOMAINADMIN_CONF_FOR_SESSION}" chmod +x "${CLIENT_DOMAINADMIN_CONF_FOR_SESSION}" CLIENT_USER_CONF_FOR_API="env-local-user-api.sh" CLIENT_USER_CONF_FOR_SESSION="env-local-user-session.sh" @@ -1076,8 +1075,8 @@ configure_backendai() { echo "export BACKEND_ENDPOINT_TYPE=session" >> "${CLIENT_USER_CONF_FOR_SESSION}" echo "echo 'Run backend.ai login to make an active session.'" >> "${CLIENT_USER_CONF_FOR_SESSION}" - echo "echo 'Username: $(cat fixtures/manager/example-keypairs.json | jq -r '.users[] | select(.username=="user") | .email')'" >> "${CLIENT_USER_CONF_FOR_SESSION}" - echo "echo 'Password: $(cat fixtures/manager/example-keypairs.json | jq -r '.users[] | select(.username=="user") | .password')'" >> "${CLIENT_USER_CONF_FOR_SESSION}" + echo "echo 'Username: $(cat fixtures/manager/example-users.json | jq -r '.users[] | select(.username=="user") | .email')'" >> "${CLIENT_USER_CONF_FOR_SESSION}" + echo "echo 'Password: $(cat fixtures/manager/example-users.json | jq -r '.users[] | select(.username=="user") | .password')'" >> "${CLIENT_USER_CONF_FOR_SESSION}" chmod +x "${CLIENT_USER_CONF_FOR_SESSION}" show_info "Dumping the installed etcd configuration to ./dev.etcd.installed.json as a backup." diff --git a/src/ai/backend/agent/agent.py b/src/ai/backend/agent/agent.py index 73c5440a5f0..20fbc7f3b24 100644 --- a/src/ai/backend/agent/agent.py +++ b/src/ai/backend/agent/agent.py @@ -153,7 +153,14 @@ known_slot_types, ) from .stats import StatContext, StatModes -from .types import Container, ContainerLifecycleEvent, ContainerStatus, LifecycleEvent, MountInfo +from .types import ( + Container, + ContainerLifecycleEvent, + ContainerStatus, + KernelLifecycleStatus, + LifecycleEvent, + MountInfo, +) from .utils import generate_local_instance_id, get_arch_name if TYPE_CHECKING: @@ -554,7 +561,6 @@ class AbstractAgent( redis: Redis restarting_kernels: MutableMapping[KernelId, RestartTracker] - terminating_kernels: Set[KernelId] timer_tasks: MutableSequence[asyncio.Task] container_lifecycle_queue: asyncio.Queue[ContainerLifecycleEvent | Sentinel] @@ -594,7 +600,6 @@ def __init__( self.computers = {} self.images = {} # repoTag -> digest self.restarting_kernels = {} - self.terminating_kernels = set() self.stat_ctx = StatContext( self, mode=StatModes(local_config["container"]["stats-type"]), @@ -698,7 +703,13 @@ async def _pipeline(r: Redis): self.timer_tasks.append(aiotools.create_timer(self.heartbeat, heartbeat_interval)) # Prepare auto-cleaning of idle kernels. - self.timer_tasks.append(aiotools.create_timer(self.sync_container_lifecycles, 10.0)) + sync_container_lifecycles_config = self.local_config["agent"]["sync-container-lifecycles"] + if sync_container_lifecycles_config["enabled"]: + self.timer_tasks.append( + aiotools.create_timer( + self.sync_container_lifecycles, sync_container_lifecycles_config["interval"] + ) + ) if abuse_report_path := self.local_config["agent"].get("abuse-report-path"): log.info( @@ -737,6 +748,7 @@ async def shutdown(self, stop_signal: signal.Signals) -> None: if kernel_obj.runner is not None: await kernel_obj.runner.close() await kernel_obj.close() + await self.save_last_registry(force=True) if stop_signal == signal.SIGTERM: await self.clean_all_kernels(blocking=True) @@ -956,7 +968,10 @@ async def collect_container_stat(self, interval: float): container_ids = [] async with self.registry_lock: for kernel_id, kernel_obj in [*self.kernel_registry.items()]: - if not kernel_obj.stats_enabled: + if ( + not kernel_obj.stats_enabled + or kernel_obj.state != KernelLifecycleStatus.RUNNING + ): continue container_ids.append(kernel_obj["container_id"]) await self.stat_ctx.collect_container_stat(container_ids) @@ -974,7 +989,10 @@ async def collect_process_stat(self, interval: float): container_ids = [] async with self.registry_lock: for kernel_id, kernel_obj in [*self.kernel_registry.items()]: - if not kernel_obj.stats_enabled: + if ( + not kernel_obj.stats_enabled + or kernel_obj.state != KernelLifecycleStatus.RUNNING + ): continue updated_kernel_ids.append(kernel_id) container_ids.append(kernel_obj["container_id"]) @@ -999,6 +1017,7 @@ async def _handle_start_event(self, ev: ContainerLifecycleEvent) -> None: kernel_obj = self.kernel_registry.get(ev.kernel_id) if kernel_obj is not None: kernel_obj.stats_enabled = True + kernel_obj.state = KernelLifecycleStatus.RUNNING async def _handle_destroy_event(self, ev: ContainerLifecycleEvent) -> None: try: @@ -1006,12 +1025,13 @@ async def _handle_destroy_event(self, ev: ContainerLifecycleEvent) -> None: assert current_task is not None if ev.kernel_id not in self._ongoing_destruction_tasks: self._ongoing_destruction_tasks[ev.kernel_id] = current_task - self.terminating_kernels.add(ev.kernel_id) async with self.registry_lock: kernel_obj = self.kernel_registry.get(ev.kernel_id) if kernel_obj is None: log.warning( - "destroy_kernel(k:{0}) kernel missing (already dead?)", ev.kernel_id + "destroy_kernel(k:{0}, c:{1}) kernel missing (already dead?)", + ev.kernel_id, + ev.container_id, ) if ev.container_id is None: await self.reconstruct_resource_usage() @@ -1027,6 +1047,7 @@ async def _handle_destroy_event(self, ev: ContainerLifecycleEvent) -> None: ev.done_future.set_result(None) return else: + kernel_obj.state = KernelLifecycleStatus.TERMINATING kernel_obj.stats_enabled = False kernel_obj.termination_reason = ev.reason if kernel_obj.runner is not None: @@ -1039,18 +1060,17 @@ async def _handle_destroy_event(self, ev: ContainerLifecycleEvent) -> None: ev.done_future.set_exception(e) raise finally: - if ev.container_id is not None: - await self.container_lifecycle_queue.put( - ContainerLifecycleEvent( - ev.kernel_id, - ev.session_id, - ev.container_id, - LifecycleEvent.CLEAN, - ev.reason, - suppress_events=ev.suppress_events, - done_future=ev.done_future, - ), - ) + await self.container_lifecycle_queue.put( + ContainerLifecycleEvent( + ev.kernel_id, + ev.session_id, + ev.container_id, + LifecycleEvent.CLEAN, + ev.reason, + suppress_events=ev.suppress_events, + done_future=ev.done_future, + ), + ) except asyncio.CancelledError: pass except Exception: @@ -1101,7 +1121,6 @@ async def _handle_clean_event(self, ev: ContainerLifecycleEvent) -> None: self.port_pool.update(restored_ports) await kernel_obj.close() finally: - self.terminating_kernels.discard(ev.kernel_id) if restart_tracker := self.restarting_kernels.get(ev.kernel_id, None): restart_tracker.destroy_event.set() else: @@ -1262,75 +1281,124 @@ async def sync_container_lifecycles(self, interval: float) -> None: for cases when we miss the container lifecycle events from the underlying implementation APIs due to the agent restarts or crashes. """ - known_kernels: Dict[KernelId, ContainerId] = {} + known_kernels: Dict[KernelId, ContainerId | None] = {} alive_kernels: Dict[KernelId, ContainerId] = {} kernel_session_map: Dict[KernelId, SessionId] = {} own_kernels: dict[KernelId, ContainerId] = {} - terminated_kernels = {} + terminated_kernels: dict[KernelId, ContainerLifecycleEvent] = {} - async with self.registry_lock: + def _get_session_id(container: Container) -> SessionId | None: + _session_id = container.labels.get("ai.backend.session-id") try: - # Check if: there are dead containers - for kernel_id, container in await self.enumerate_containers(DEAD_STATUS_SET): - if ( - kernel_id in self.restarting_kernels - or kernel_id in self.terminating_kernels - ): - continue - log.info( - "detected dead container during lifeycle sync (k:{}, c:{})", - kernel_id, - container.id, - ) - session_id = SessionId(UUID(container.labels["ai.backend.session-id"])) - terminated_kernels[kernel_id] = ContainerLifecycleEvent( - kernel_id, - session_id, - known_kernels[kernel_id], - LifecycleEvent.CLEAN, - KernelLifecycleEventReason.SELF_TERMINATED, - ) - for kernel_id, container in await self.enumerate_containers(ACTIVE_STATUS_SET): - alive_kernels[kernel_id] = container.id - session_id = SessionId(UUID(container.labels["ai.backend.session-id"])) - kernel_session_map[kernel_id] = session_id - own_kernels[kernel_id] = container.id - for kernel_id, kernel_obj in self.kernel_registry.items(): - known_kernels[kernel_id] = kernel_obj["container_id"] - session_id = kernel_obj.session_id - kernel_session_map[kernel_id] = session_id - # Check if: kernel_registry has the container but it's gone. - for kernel_id in known_kernels.keys() - alive_kernels.keys(): - if ( - kernel_id in self.restarting_kernels - or kernel_id in self.terminating_kernels - ): - continue - terminated_kernels[kernel_id] = ContainerLifecycleEvent( - kernel_id, - kernel_session_map[kernel_id], - known_kernels[kernel_id], - LifecycleEvent.CLEAN, - KernelLifecycleEventReason.SELF_TERMINATED, + return SessionId(UUID(_session_id)) + except ValueError: + log.warning( + f"sync_container_lifecycles() invalid session-id (cid: {container.id}, sid:{_session_id})" + ) + return None + + log.debug("sync_container_lifecycles(): triggered") + try: + _containers = await self.enumerate_containers(ACTIVE_STATUS_SET | DEAD_STATUS_SET) + async with self.registry_lock: + try: + # Check if: there are dead containers + dead_containers = [ + (kid, container) + for kid, container in _containers + if container.status in DEAD_STATUS_SET + ] + log.debug( + f"detected dead containers: {[container.id[:12] for _, container in dead_containers]}" ) - # Check if: there are containers not spawned by me. - for kernel_id in alive_kernels.keys() - known_kernels.keys(): - if kernel_id in self.restarting_kernels: - continue - terminated_kernels[kernel_id] = ContainerLifecycleEvent( - kernel_id, - kernel_session_map[kernel_id], - alive_kernels[kernel_id], - LifecycleEvent.DESTROY, - KernelLifecycleEventReason.TERMINATED_UNKNOWN_CONTAINER, + for kernel_id, container in dead_containers: + if kernel_id in self.restarting_kernels: + continue + log.info( + "detected dead container during lifeycle sync (k:{}, c:{})", + kernel_id, + container.id, + ) + session_id = _get_session_id(container) + if session_id is None: + continue + terminated_kernels[kernel_id] = ContainerLifecycleEvent( + kernel_id, + session_id, + container.id, + LifecycleEvent.CLEAN, + KernelLifecycleEventReason.SELF_TERMINATED, + ) + active_containers = [ + (kid, container) + for kid, container in _containers + if container.status in ACTIVE_STATUS_SET + ] + log.debug( + f"detected active containers: {[container.id[:12] for _, container in active_containers]}" ) - finally: - # Enqueue the events. - for kernel_id, ev in terminated_kernels.items(): - await self.container_lifecycle_queue.put(ev) - - # Set container count - await self.set_container_count(len(own_kernels.keys())) + for kernel_id, container in active_containers: + alive_kernels[kernel_id] = container.id + session_id = _get_session_id(container) + if session_id is None: + continue + kernel_session_map[kernel_id] = session_id + own_kernels[kernel_id] = container.id + for kernel_id, kernel_obj in self.kernel_registry.items(): + known_kernels[kernel_id] = ( + ContainerId(kernel_obj.container_id) + if kernel_obj.container_id is not None + else None + ) + session_id = kernel_obj.session_id + kernel_session_map[kernel_id] = session_id + # Check if: kernel_registry has the container but it's gone. + for kernel_id in known_kernels.keys() - alive_kernels.keys(): + kernel_obj = self.kernel_registry[kernel_id] + if ( + kernel_id in self.restarting_kernels + or kernel_obj.state != KernelLifecycleStatus.RUNNING + ): + continue + log.debug(f"kernel with no container (kid: {kernel_id})") + terminated_kernels[kernel_id] = ContainerLifecycleEvent( + kernel_id, + kernel_session_map[kernel_id], + known_kernels[kernel_id], + LifecycleEvent.CLEAN, + KernelLifecycleEventReason.CONTAINER_NOT_FOUND, + ) + # Check if: there are containers already deleted from my registry. + for kernel_id in alive_kernels.keys() - known_kernels.keys(): + if kernel_id in self.restarting_kernels: + continue + log.debug(f"kernel not found in registry (kid:{kernel_id})") + terminated_kernels[kernel_id] = ContainerLifecycleEvent( + kernel_id, + kernel_session_map[kernel_id], + alive_kernels[kernel_id], + LifecycleEvent.DESTROY, + KernelLifecycleEventReason.TERMINATED_UNKNOWN_CONTAINER, + ) + finally: + # Enqueue the events. + terminated_kernel_ids = ",".join([ + str(kid) for kid in terminated_kernels.keys() + ]) + if terminated_kernel_ids: + log.debug(f"Terminate kernels(ids:[{terminated_kernel_ids}])") + for kernel_id, ev in terminated_kernels.items(): + await self.container_lifecycle_queue.put(ev) + + # Set container count + await self.set_container_count(len(own_kernels.keys())) + except asyncio.CancelledError: + pass + except asyncio.TimeoutError: + log.warning("sync_container_lifecycles() timeout, continuing") + except Exception as e: + log.exception(f"sync_container_lifecycles() failure, continuing (detail: {repr(e)})") + await self.produce_error_event() async def set_container_count(self, container_count: int) -> None: await redis_helper.execute( @@ -1679,6 +1747,18 @@ async def execute_batch( SessionFailureEvent(session_id, KernelLifecycleEventReason.TASK_CANCELLED, -2), ) + async def create_batch_execution_task( + self, + session_id: SessionId, + kernel_id: KernelId, + code_to_execute: str, + ) -> None: + self._ongoing_exec_batch_tasks.add( + asyncio.create_task( + self.execute_batch(session_id, kernel_id, code_to_execute), + ), + ) + async def create_kernel( self, session_id: SessionId, @@ -1946,7 +2026,7 @@ async def create_kernel( service_ports, ) async with self.registry_lock: - self.kernel_registry[ctx.kernel_id] = kernel_obj + self.kernel_registry[kernel_id] = kernel_obj try: container_data = await ctx.start_container( kernel_obj, @@ -1958,7 +2038,7 @@ async def create_kernel( msg = e.message or "unknown" log.error( "Kernel failed to create container. Kernel is going to be destroyed." - f" (k:{ctx.kernel_id}, detail:{msg})", + f" (k:{kernel_id}, detail:{msg})", ) cid = e.container_id async with self.registry_lock: @@ -1973,17 +2053,22 @@ async def create_kernel( raise AgentError( f"Kernel failed to create container (k:{str(ctx.kernel_id)}, detail:{msg})" ) - except Exception: + except Exception as e: log.warning( - "Kernel failed to create container (k:{}). Kernel is going to be" - " unregistered.", + "Kernel failed to create container (k:{}). Kernel is going to be destroyed.", kernel_id, ) - async with self.registry_lock: - del self.kernel_registry[kernel_id] - raise + await self.inject_container_lifecycle_event( + kernel_id, + session_id, + LifecycleEvent.DESTROY, + KernelLifecycleEventReason.FAILED_TO_CREATE, + ) + raise AgentError( + f"Kernel failed to create container (k:{str(kernel_id)}, detail: {str(e)})" + ) async with self.registry_lock: - self.kernel_registry[ctx.kernel_id].data.update(container_data) + self.kernel_registry[kernel_id].data.update(container_data) await kernel_obj.init(self.event_producer) current_task = asyncio.current_task() @@ -2063,18 +2148,8 @@ async def create_kernel( }, ), ) - - if ( - kernel_config["session_type"] == "batch" - and kernel_config["cluster_role"] == "main" - ): - self._ongoing_exec_batch_tasks.add( - asyncio.create_task( - self.execute_batch( - session_id, kernel_id, kernel_config["startup_command"] or "" - ), - ), - ) + async with self.registry_lock: + kernel_obj.state = KernelLifecycleStatus.RUNNING # The startup command for the batch-type sessions will be executed by the manager # upon firing of the "session_started" event. diff --git a/src/ai/backend/agent/config.py b/src/ai/backend/agent/config.py index 6f2526b1436..f7a7cdcf752 100644 --- a/src/ai/backend/agent/config.py +++ b/src/ai/backend/agent/config.py @@ -16,6 +16,11 @@ "size-limit": "64M", } +default_sync_container_lifecycles_config = { + "enabled": True, + "interval": 10.0, +} + agent_local_config_iv = ( t.Dict({ t.Key("agent"): t.Dict({ @@ -59,6 +64,16 @@ t.Key("force-terminate-abusing-containers", default=False): t.ToBool, t.Key("kernel-creation-concurrency", default=4): t.ToInt[1:32], t.Key("use-experimental-redis-event-dispatcher", default=False): t.ToBool, + t.Key( + "sync-container-lifecycles", default=default_sync_container_lifecycles_config + ): t.Dict({ + t.Key( + "enabled", default=default_sync_container_lifecycles_config["enabled"] + ): t.ToBool, + t.Key( + "interval", default=default_sync_container_lifecycles_config["interval"] + ): t.ToFloat[0:], + }).allow_extra("*"), }).allow_extra("*"), t.Key("container"): t.Dict({ t.Key("kernel-uid", default=-1): tx.UserID, diff --git a/src/ai/backend/agent/docker/agent.py b/src/ai/backend/agent/docker/agent.py index aa979e60731..8e7bbee81d5 100644 --- a/src/ai/backend/agent/docker/agent.py +++ b/src/ai/backend/agent/docker/agent.py @@ -10,6 +10,7 @@ import signal import struct import sys +from collections.abc import Mapping from decimal import Decimal from functools import partial from io import StringIO @@ -22,13 +23,13 @@ FrozenSet, List, Literal, - Mapping, MutableMapping, Optional, Sequence, Set, Tuple, Union, + cast, ) from uuid import UUID @@ -38,6 +39,7 @@ import zmq from aiodocker.docker import Docker, DockerContainer from aiodocker.exceptions import DockerError +from aiodocker.types import PortInfo from aiomonitor.task import preserve_termination_log from async_timeout import timeout @@ -120,6 +122,30 @@ def container_from_docker_container(src: DockerContainer) -> Container: ) +async def _clean_scratch( + loop: asyncio.AbstractEventLoop, + scratch_type: str, + scratch_root: Path, + kernel_id: KernelId, +) -> None: + scratch_dir = scratch_root / str(kernel_id) + tmp_dir = scratch_root / f"{kernel_id}_tmp" + try: + if sys.platform.startswith("linux") and scratch_type == "memory": + await destroy_scratch_filesystem(scratch_dir) + await destroy_scratch_filesystem(tmp_dir) + await loop.run_in_executor(None, shutil.rmtree, scratch_dir) + await loop.run_in_executor(None, shutil.rmtree, tmp_dir) + elif sys.platform.startswith("linux") and scratch_type == "hostfile": + await destroy_loop_filesystem(scratch_root, kernel_id) + else: + await loop.run_in_executor(None, shutil.rmtree, scratch_dir) + except CalledProcessError: + pass + except FileNotFoundError: + pass + + def _DockerError_reduce(self): return ( type(self), @@ -851,6 +877,18 @@ async def start_container( if self.local_config["debug"]["log-kernel-config"]: log.debug("full container config: {!r}", pretty(container_config)) + async def _rollback_container_creation() -> None: + await _clean_scratch( + loop, + self.local_config["container"]["scratch-type"], + self.local_config["container"]["scratch-root"], + self.kernel_id, + ) + self.port_pool.update(host_ports) + async with self.resource_lock: + for dev_name, device_alloc in resource_spec.allocations.items(): + self.computers[dev_name].alloc_map.free(device_alloc) + # We are all set! Create and start the container. async with closing_async(Docker()) as docker: container: Optional[DockerContainer] = None @@ -859,7 +897,7 @@ async def start_container( config=container_config, name=kernel_name ) assert container is not None - cid = container._id + cid = cast(str, container._id) resource_spec.container_id = cid # Write resource.txt again to update the container id. with open(self.config_dir / "resource.txt", "w") as f: @@ -874,51 +912,48 @@ async def start_container( for k, v in kvpairs.items(): await writer.write(f"{k}={v}\n") - await container.start() - - if self.internal_data.get("sudo_session_enabled", False): - exec = await container.exec( - [ - # file ownership is guaranteed to be set as root:root since command is executed on behalf of root user - "sh", - "-c", - 'mkdir -p /etc/sudoers.d && echo "work ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers.d/01-bai-work', - ], - user="root", - ) - shell_response = await exec.start(detach=True) - if shell_response: - raise ContainerCreationError( - container_id=cid, - message=f"sudoers provision failed: {shell_response.decode()}", - ) except asyncio.CancelledError: if container is not None: raise ContainerCreationError( - container_id=cid, message="Container creation cancelled" + container_id=container._id, message="Container creation cancelled" ) raise - except Exception: + except Exception as e: # Oops, we have to restore the allocated resources! - scratch_type = self.local_config["container"]["scratch-type"] - scratch_root = self.local_config["container"]["scratch-root"] - if sys.platform.startswith("linux") and scratch_type == "memory": - await destroy_scratch_filesystem(self.scratch_dir) - await destroy_scratch_filesystem(self.tmp_dir) - await loop.run_in_executor(None, shutil.rmtree, self.scratch_dir) - await loop.run_in_executor(None, shutil.rmtree, self.tmp_dir) - elif sys.platform.startswith("linux") and scratch_type == "hostfile": - await destroy_loop_filesystem(scratch_root, self.kernel_id) - else: - await loop.run_in_executor(None, shutil.rmtree, self.scratch_dir) - self.port_pool.update(host_ports) - async with self.resource_lock: - for dev_name, device_alloc in resource_spec.allocations.items(): - self.computers[dev_name].alloc_map.free(device_alloc) + await _rollback_container_creation() if container is not None: - raise ContainerCreationError(container_id=cid, message="unknown") + raise ContainerCreationError( + container_id=container._id, message=f"unknown. {repr(e)}" + ) raise + try: + await container.start() + except asyncio.CancelledError: + await _rollback_container_creation() + raise ContainerCreationError(container_id=cid, message="Container start cancelled") + except Exception as e: + await _rollback_container_creation() + raise ContainerCreationError(container_id=cid, message=f"unknown. {repr(e)}") + + if self.internal_data.get("sudo_session_enabled", False): + exec = await container.exec( + [ + # file ownership is guaranteed to be set as root:root since command is executed on behalf of root user + "sh", + "-c", + 'mkdir -p /etc/sudoers.d && echo "work ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers.d/01-bai-work', + ], + user="root", + ) + shell_response = await exec.start(detach=True) + if shell_response: + await _rollback_container_creation() + raise ContainerCreationError( + container_id=cid, + message=f"sudoers provision failed: {shell_response.decode()}", + ) + additional_network_names: Set[str] = set() for dev_name, device_alloc in resource_spec.allocations.items(): n = await self.computers[dev_name].instance.get_docker_networks(device_alloc) @@ -935,7 +970,7 @@ async def start_container( if container_config["HostConfig"].get("NetworkMode") == "host": host_port = host_ports[idx] else: - ports: list[dict[str, Any]] | None = await container.port(port) + ports: list[PortInfo] | None = await container.port(port) if ports is None: raise ContainerCreationError( container_id=cid, message="Container port not found" @@ -1500,24 +1535,12 @@ async def log_iter(): log.warning("container deletion timeout (k:{}, c:{})", kernel_id, container_id) if not restarting: - scratch_type = self.local_config["container"]["scratch-type"] - scratch_root = self.local_config["container"]["scratch-root"] - scratch_dir = scratch_root / str(kernel_id) - tmp_dir = scratch_root / f"{kernel_id}_tmp" - try: - if sys.platform.startswith("linux") and scratch_type == "memory": - await destroy_scratch_filesystem(scratch_dir) - await destroy_scratch_filesystem(tmp_dir) - await loop.run_in_executor(None, shutil.rmtree, scratch_dir) - await loop.run_in_executor(None, shutil.rmtree, tmp_dir) - elif sys.platform.startswith("linux") and scratch_type == "hostfile": - await destroy_loop_filesystem(scratch_root, kernel_id) - else: - await loop.run_in_executor(None, shutil.rmtree, scratch_dir) - except CalledProcessError: - pass - except FileNotFoundError: - pass + await _clean_scratch( + loop, + self.local_config["container"]["scratch-type"], + self.local_config["container"]["scratch-root"], + kernel_id, + ) async def create_local_network(self, network_name: str) -> None: async with closing_async(Docker()) as docker: diff --git a/src/ai/backend/agent/docker/intrinsic.py b/src/ai/backend/agent/docker/intrinsic.py index 2045519b3b5..357f031c858 100644 --- a/src/ai/backend/agent/docker/intrinsic.py +++ b/src/ai/backend/agent/docker/intrinsic.py @@ -53,6 +53,19 @@ log = BraceStyleAdapter(logging.getLogger(__spec__.name)) # type: ignore[name-defined] +# The list of pruned fstype when checking the filesystem usage statistics. +# Note that psutil's linux implementation automatically filters out "non-device" filesystems by +# checking /proc/filesystems so we don't have to put all the details virtual filesystems like +# "sockfs", "debugfs", etc. +pruned_disk_types = frozenset([ + "vfat", + "lxcfs", + "squashfs", + "tmpfs", + "iso9660", # cdrom +]) + + def netstat_ns_work(ns_path: Path): with nsenter(ns_path): result = psutil.net_io_counters(pernic=True) @@ -90,23 +103,24 @@ async def fetch_api_stats(container: DockerContainer) -> Optional[Dict[str, Any] ) return None else: + entry = {"read": "0001-01-01"} # aiodocker 0.16 or later returns a list of dict, even when not streaming. - if isinstance(ret, list): - if not ret: + match ret: + case list() if ret: + entry = ret[0] + case dict() if ret: + entry = ret + case _: # The API may return an empty result upon container termination. + log.warning( + "cannot read stats (cid:{}): got an empty result: {}", + short_cid, + ret, + ) return None - ret = ret[0] - # The API may return an invalid or empty result upon container termination. - if ret is None or not isinstance(ret, dict): - log.warning( - "cannot read stats (cid:{}): got an empty result: {}", - short_cid, - ret, - ) - return None - if ret["read"].startswith("0001-01-01") or ret["preread"].startswith("0001-01-01"): + if entry["read"].startswith("0001-01-01") or entry["preread"].startswith("0001-01-01"): return None - return ret + return entry # Pseudo-plugins for intrinsic devices (CPU and the main memory) @@ -489,20 +503,25 @@ async def gather_node_measures(self, ctx: StatContext) -> Sequence[NodeMeasureme net_tx_bytes = _nstat.bytes_sent def get_disk_stat(): - pruned_disk_types = frozenset(["squashfs", "vfat", "tmpfs"]) total_disk_usage = Decimal(0) total_disk_capacity = Decimal(0) per_disk_stat = {} for disk_info in psutil.disk_partitions(): - if disk_info.fstype not in pruned_disk_types: - if "/var/lib/docker/btrfs" == disk_info.mountpoint: - continue - dstat = os.statvfs(disk_info.mountpoint) - disk_usage = Decimal(dstat.f_frsize * (dstat.f_blocks - dstat.f_bavail)) - disk_capacity = Decimal(dstat.f_frsize * dstat.f_blocks) - per_disk_stat[disk_info.device] = Measurement(disk_usage, disk_capacity) - total_disk_usage += disk_usage - total_disk_capacity += disk_capacity + # Skip additional filesystem types not filtered by psutil, like squashfs. + if disk_info.fstype in pruned_disk_types: + continue + # Skip transient filesystems created/destroyed by Docker. + if disk_info.mountpoint.startswith("/proc/docker/runtime-runc/moby/"): + continue + # Skip btrfs subvolumes used by Docker if configured. + if disk_info.mountpoint == "/var/lib/docker/btrfs": + continue + dstat = os.statvfs(disk_info.mountpoint) + disk_usage = Decimal(dstat.f_frsize * (dstat.f_blocks - dstat.f_bavail)) + disk_capacity = Decimal(dstat.f_frsize * dstat.f_blocks) + per_disk_stat[disk_info.device] = Measurement(disk_usage, disk_capacity) + total_disk_usage += disk_usage + total_disk_capacity += disk_capacity return total_disk_usage, total_disk_capacity, per_disk_stat loop = current_loop() diff --git a/src/ai/backend/agent/docker/kernel.py b/src/ai/backend/agent/docker/kernel.py index 2591f836015..842b78d6cae 100644 --- a/src/ai/backend/agent/docker/kernel.py +++ b/src/ai/backend/agent/docker/kernel.py @@ -1,6 +1,9 @@ +from __future__ import annotations + import asyncio import functools import gzip +import io import logging import lzma import os @@ -9,7 +12,7 @@ import subprocess import textwrap from pathlib import Path, PurePosixPath -from typing import Any, Dict, Final, FrozenSet, Mapping, Optional, Sequence, Tuple +from typing import Any, Dict, Final, FrozenSet, Mapping, Optional, Sequence, Tuple, cast, override import janus import pkg_resources @@ -103,7 +106,7 @@ async def get_logs(self): container_id = self.data["container_id"] async with closing_async(Docker()) as docker: container = await docker.containers.get(container_id) - logs = await container.log(stdout=True, stderr=True) + logs = await container.log(stdout=True, stderr=True, follow=False) return {"logs": "".join(logs)} async def interrupt_kernel(self): @@ -220,7 +223,7 @@ def _write_chunks( else: repo, tag = None, None response: Mapping[str, Any] = await container.commit( - changes=changes, + changes=changes or None, repository=repo, tag=tag, config=config, @@ -258,85 +261,110 @@ def _write_chunks( except asyncio.TimeoutError: log.warning("Session is already being committed.") - async def accept_file(self, filename: str, filedata: bytes): + @override + async def accept_file(self, container_path: os.PathLike | str, filedata: bytes) -> None: loop = current_loop() - work_dir = self.agent_config["container"]["scratch-root"] / str(self.kernel_id) / "work" + container_home_path = PurePosixPath("/home/work") try: - # create intermediate directories in the path - dest_path = (work_dir / filename).resolve(strict=False) - parent_path = dest_path.parent - except ValueError: # parent_path does not start with work_dir! - raise AssertionError("malformed upload filename and path.") + home_relpath = PurePosixPath(container_path).relative_to(container_home_path) + except ValueError: + raise PermissionError("Not allowed to upload files outside /home/work") + host_work_dir: Path = ( + self.agent_config["container"]["scratch-root"] / str(self.kernel_id) / "work" + ) + host_abspath = (host_work_dir / home_relpath).resolve(strict=False) + if not host_abspath.is_relative_to(host_work_dir): + raise PermissionError("Not allowed to upload files outside /home/work") def _write_to_disk(): - parent_path.mkdir(parents=True, exist_ok=True) - dest_path.write_bytes(filedata) + host_abspath.parent.mkdir(parents=True, exist_ok=True) + host_abspath.write_bytes(filedata) try: await loop.run_in_executor(None, _write_to_disk) - except FileNotFoundError: - log.error( - "{0}: writing uploaded file failed: {1} -> {2}", self.kernel_id, filename, dest_path + except OSError as e: + raise RuntimeError( + "{0}: writing uploaded file failed: {1} -> {2} ({3})".format( + self.kernel_id, + container_path, + host_abspath, + repr(e), + ) ) - async def download_file(self, filepath: str): + @override + async def download_file(self, container_path: os.PathLike | str) -> bytes: container_id = self.data["container_id"] + + container_home_path = PurePosixPath("/home/work") + container_abspath = PurePosixPath(os.path.normpath(container_home_path / container_path)) + if not container_abspath.is_relative_to(container_home_path): + raise PermissionError("You cannot download files outside /home/work") + async with closing_async(Docker()) as docker: container = docker.containers.container(container_id) - home_path = PurePosixPath("/home/work") - try: - abspath = home_path / filepath - abspath.relative_to(home_path) - except ValueError: - raise PermissionError("You cannot download files outside /home/work") try: - with await container.get_archive(str(abspath)) as tarobj: - tarobj.fileobj.seek(0, 2) - fsize = tarobj.fileobj.tell() - if fsize > 1048576: - raise ValueError("too large file") - tarbytes = tarobj.fileobj.getvalue() + with await container.get_archive(str(container_abspath)) as tarobj: + # FIXME: Replace this API call to a streaming version and cut the download if + # the downloaded size exceeds the limit. + assert tarobj.fileobj is not None + tar_fobj = cast(io.BufferedIOBase, tarobj.fileobj) + tar_fobj.seek(0, io.SEEK_END) + tar_size = tar_fobj.tell() + if tar_size > 1048576: + raise ValueError("Too large archive file exceeding 1 MiB") + tar_fobj.seek(0, io.SEEK_SET) + tarbytes = tar_fobj.read() except DockerError: - log.warning("Could not found the file: {0}", abspath) - raise FileNotFoundError(f"Could not found the file: {abspath}") + raise RuntimeError(f"Could not download the archive to: {container_abspath}") return tarbytes - async def download_single(self, filepath: str): + @override + async def download_single(self, container_path: os.PathLike | str) -> bytes: container_id = self.data["container_id"] + + container_home_path = PurePosixPath("/home/work") + container_abspath = PurePosixPath(os.path.normpath(container_home_path / container_path)) + if not container_abspath.is_relative_to(container_home_path): + raise PermissionError("You cannot download files outside /home/work") + async with closing_async(Docker()) as docker: container = docker.containers.container(container_id) - home_path = PurePosixPath("/home/work") - try: - abspath = home_path / filepath - abspath.relative_to(home_path) - except ValueError: - raise PermissionError("You cannot download files outside /home/work") try: - with await container.get_archive(str(abspath)) as tarobj: - tarobj.fileobj.seek(0, 2) - fsize = tarobj.fileobj.tell() - if fsize > 1048576: - raise ValueError("too large file") - tarobj.fileobj.seek(0) - inner_file = tarobj.extractfile(tarobj.getnames()[0]) - if inner_file: - tarbytes = inner_file.read() - else: - log.warning("Could not found the file: {0}", abspath) - raise FileNotFoundError(f"Could not found the file: {abspath}") + with await container.get_archive(str(container_abspath)) as tarobj: + # FIXME: Replace this API call to a streaming version and cut the download if + # the downloaded size exceeds the limit. + assert tarobj.fileobj is not None + tar_fobj = cast(io.BufferedIOBase, tarobj.fileobj) + tar_fobj.seek(0, io.SEEK_END) + tar_size = tar_fobj.tell() + if tar_size > 1048576: + raise ValueError("Too large archive file exceeding 1 MiB") + tar_fobj.seek(0, io.SEEK_SET) + if len(tarobj.getnames()) > 1: + raise ValueError( + f"Expected a single-file archive but found multiple files from {container_abspath}" + ) + inner_fname = tarobj.getnames()[0] + inner_fobj = tarobj.extractfile(inner_fname) + if not inner_fobj: + raise ValueError( + f"Could not read {inner_fname!r} the archive file {container_abspath}" + ) + # FYI: To get the size of extracted file, seek and tell with inner_fobj. + content_bytes = inner_fobj.read() except DockerError: - log.warning("Could not found the file: {0}", abspath) - raise FileNotFoundError(f"Could not found the file: {abspath}") - return tarbytes + raise RuntimeError(f"Could not download the archive to: {container_abspath}") + return content_bytes - async def list_files(self, container_path: str): + @override + async def list_files(self, container_path: os.PathLike | str): container_id = self.data["container_id"] # Confine the lookable paths in the home directory - home_path = Path("/home/work").resolve() - resolved_path = (home_path / container_path).resolve() - - if str(os.path.commonpath([resolved_path, home_path])) != str(home_path): + container_home_path = PurePosixPath("/home/work") + container_abspath = PurePosixPath(os.path.normpath(container_home_path / container_path)) + if not container_abspath.is_relative_to(container_home_path): raise PermissionError("You cannot list files outside /home/work") # Gather individual file information in the target path. @@ -373,7 +401,7 @@ async def list_files(self, container_path: str): "/opt/backend.ai/bin/python", "-c", code, - str(container_path), + str(container_abspath), ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, diff --git a/src/ai/backend/agent/docker/utils.py b/src/ai/backend/agent/docker/utils.py index 32505323e34..36171fcb638 100644 --- a/src/ai/backend/agent/docker/utils.py +++ b/src/ai/backend/agent/docker/utils.py @@ -132,7 +132,7 @@ async def recreate(self) -> None: pass else: raise - container_config = { + container_config: dict[str, Any] = { "Image": self.image_ref, "Tty": True, "Privileged": False, diff --git a/src/ai/backend/agent/dummy/agent.py b/src/ai/backend/agent/dummy/agent.py index 362c7f55fb7..dfe05bbb1dc 100644 --- a/src/ai/backend/agent/dummy/agent.py +++ b/src/ai/backend/agent/dummy/agent.py @@ -260,6 +260,10 @@ async def pull_image(self, image_ref: ImageRef, registry_conf: ImageRegistry) -> delay = self.dummy_agent_cfg["delay"]["pull-image"] await asyncio.sleep(delay) + async def push_image(self, image_ref: ImageRef, registry_conf: ImageRegistry) -> None: + delay = self.dummy_agent_cfg["delay"]["push-image"] + await asyncio.sleep(delay) + async def check_image( self, image_ref: ImageRef, image_id: str, auto_pull: AutoPullBehavior ) -> bool: diff --git a/src/ai/backend/agent/dummy/kernel.py b/src/ai/backend/agent/dummy/kernel.py index 4d99d343feb..662c12a2dd3 100644 --- a/src/ai/backend/agent/dummy/kernel.py +++ b/src/ai/backend/agent/dummy/kernel.py @@ -1,6 +1,9 @@ +from __future__ import annotations + import asyncio +import os from collections import OrderedDict -from typing import Any, Dict, FrozenSet, Mapping, Sequence +from typing import Any, Dict, FrozenSet, Mapping, Sequence, override from ai.backend.common.docker import ImageRef from ai.backend.common.events import EventProducer @@ -137,21 +140,25 @@ async def get_service_apps(self): "data": [], } - async def accept_file(self, filename, filedata): + @override + async def accept_file(self, container_path: os.PathLike | str, filedata: bytes) -> None: delay = self.dummy_kernel_cfg["delay"]["accept-file"] await asyncio.sleep(delay) - async def download_file(self, filepath): + @override + async def download_file(self, container_path: os.PathLike | str) -> bytes: delay = self.dummy_kernel_cfg["delay"]["download-file"] await asyncio.sleep(delay) return b"" - async def download_single(self, filepath): + @override + async def download_single(self, container_path: os.PathLike | str) -> bytes: delay = self.dummy_kernel_cfg["delay"]["download-single"] await asyncio.sleep(delay) return b"" - async def list_files(self, path: str): + @override + async def list_files(self, container_path: os.PathLike | str): delay = self.dummy_kernel_cfg["delay"]["list-files"] await asyncio.sleep(delay) return {"files": "", "errors": "", "abspath": ""} diff --git a/src/ai/backend/agent/kernel.py b/src/ai/backend/agent/kernel.py index b6fb207f830..8d0904cd6f2 100644 --- a/src/ai/backend/agent/kernel.py +++ b/src/ai/backend/agent/kernel.py @@ -6,6 +6,7 @@ import json import logging import math +import os import re import secrets import time @@ -54,7 +55,7 @@ from .exception import UnsupportedBaseDistroError from .resources import KernelResourceSpec -from .types import AgentEventData +from .types import AgentEventData, KernelLifecycleStatus log = BraceStyleAdapter(logging.getLogger(__spec__.name)) # type: ignore[name-defined] @@ -176,6 +177,7 @@ class AbstractKernel(UserDict, aobject, metaclass=ABCMeta): stats_enabled: bool # FIXME: apply TypedDict to data in Python 3.8 environ: Mapping[str, Any] + status: KernelLifecycleStatus _tasks: Set[asyncio.Task] @@ -212,6 +214,7 @@ def __init__( self.environ = environ self.runner = None self.container_id = None + self.state = KernelLifecycleStatus.PREPARING async def init(self, event_producer: EventProducer) -> None: log.debug( @@ -232,6 +235,9 @@ def __getstate__(self) -> Mapping[str, Any]: return props def __setstate__(self, props) -> None: + # Used when a `Kernel` object is loaded from pickle data. + if "state" not in props: + props["state"] = KernelLifecycleStatus.RUNNING self.__dict__.update(props) # agent_config is set by the pickle.loads() caller. self.clean_event = None @@ -318,19 +324,50 @@ async def get_service_apps(self): raise NotImplementedError @abstractmethod - async def accept_file(self, filename, filedata): + async def accept_file(self, container_path: os.PathLike | str, filedata) -> None: + """ + Put the uploaded file to the designated container path. + The path should be inside /home/work of the container. + A relative path is interpreted as a subpath inside /home/work. + + WARNING: Since the implementations may use the scratch directory mounted as the home + directory inside the container, the file may not be visible inside the container if the + designated home-relative path overlaps with a vfolder mount. + """ raise NotImplementedError @abstractmethod - async def download_file(self, filepath): + async def download_file(self, container_path: os.PathLike | str) -> bytes: + """ + Download the designated path (a single file or an entire directory) as a tar archive. + The path should be inside /home/work of the container. + A relative path is interpreted as a subpath inside /home/work. + The return value is the raw byte stream of the archive itself, and it is the caller's + responsibility to extract the tar archive. + + This API is intended to download a small set of files from the container filesystem. + """ raise NotImplementedError @abstractmethod - async def download_single(self, filepath): + async def download_single(self, container_path: os.PathLike | str) -> bytes: + """ + Download the designated path (a single file) as a tar archive. + The path should be inside /home/work of the container. + A relative path is interpreted as a subpath inside /home/work. + The return value is the content of the file *extracted* from the downloaded archive. + + This API is intended to download a small file from the container filesystem. + """ raise NotImplementedError @abstractmethod - async def list_files(self, path: str): + async def list_files(self, container_path: os.PathLike | str): + """ + List the directory entries of the designated path. + The path should be inside /home/work of the container. + A relative path is interpreted as a subpath inside /home/work. + """ raise NotImplementedError @abstractmethod diff --git a/src/ai/backend/agent/kubernetes/intrinsic.py b/src/ai/backend/agent/kubernetes/intrinsic.py index f895cef093b..ceb8d0f8b66 100644 --- a/src/ai/backend/agent/kubernetes/intrinsic.py +++ b/src/ai/backend/agent/kubernetes/intrinsic.py @@ -55,12 +55,13 @@ async def fetch_api_stats(container: DockerContainer) -> Optional[Dict[str, Any] ) return None else: + entry = {"read": "0001-01-01"} # aiodocker 0.16 or later returns a list of dict, even when not streaming. if isinstance(ret, list): if not ret: # The API may return an empty result upon container termination. return None - ret = ret[0] + entry = ret[0] # The API may return an invalid or empty result upon container termination. if ret is None or not isinstance(ret, dict): log.warning( @@ -69,9 +70,9 @@ async def fetch_api_stats(container: DockerContainer) -> Optional[Dict[str, Any] ret, ) return None - if ret["read"].startswith("0001-01-01") or ret["preread"].startswith("0001-01-01"): + if entry["read"].startswith("0001-01-01") or entry["preread"].startswith("0001-01-01"): return None - return ret + return entry # Pseudo-plugins for intrinsic devices (CPU and the main memory) diff --git a/src/ai/backend/agent/kubernetes/kernel.py b/src/ai/backend/agent/kubernetes/kernel.py index c5759085631..68dfc2bd8cd 100644 --- a/src/ai/backend/agent/kubernetes/kernel.py +++ b/src/ai/backend/agent/kubernetes/kernel.py @@ -4,8 +4,8 @@ import os import shutil import textwrap -from pathlib import Path -from typing import Any, Dict, FrozenSet, Mapping, Optional, Sequence, Tuple +from pathlib import Path, PurePosixPath +from typing import Any, Dict, FrozenSet, Mapping, Optional, Sequence, Tuple, override import pkg_resources import zmq @@ -229,44 +229,53 @@ async def commit( log.error("Committing in Kubernetes is not supported yet.") raise NotImplementedError - async def accept_file(self, filename: str, filedata: bytes): + @override + async def accept_file(self, container_path: os.PathLike | str, filedata: bytes) -> None: loop = current_loop() - work_dir = self.agent_config["container"]["scratch-root"] / str(self.kernel_id) / "work" + container_home_path = PurePosixPath("/home/work") try: - # create intermediate directories in the path - dest_path = (work_dir / filename).resolve(strict=False) - parent_path = dest_path.parent - except ValueError: # parent_path does not start with work_dir! - raise AssertionError("malformed upload filename and path.") + home_relpath = PurePosixPath(container_path).relative_to(container_home_path) + except ValueError: + raise PermissionError("Not allowed to upload files outside /home/work") + host_work_dir: Path = ( + self.agent_config["container"]["scratch-root"] / str(self.kernel_id) / "work" + ) + host_abspath = (host_work_dir / home_relpath).resolve(strict=False) + if not host_abspath.is_relative_to(host_work_dir): + raise PermissionError("Not allowed to upload files outside /home/work") def _write_to_disk(): - parent_path.mkdir(parents=True, exist_ok=True) - dest_path.write_bytes(filedata) + host_abspath.parent.mkdir(parents=True, exist_ok=True) + host_abspath.write_bytes(filedata) try: await loop.run_in_executor(None, _write_to_disk) - except FileNotFoundError: - log.error( - "{0}: writing uploaded file failed: {1} -> {2}", self.kernel_id, filename, dest_path + except OSError as e: + raise RuntimeError( + "{0}: writing uploaded file failed: {1} -> {2} ({3})".format( + self.kernel_id, + container_path, + host_abspath, + repr(e), + ) ) - async def download_file(self, filepath: str): + @override + async def download_file(self, container_path: os.PathLike | str) -> bytes: # TODO: Implement file operations with pure Kubernetes API await kube_config.load_kube_config() core_api = kube_client.CoreV1Api() - home_path = Path("/home/work") - try: - abspath = (home_path / filepath).resolve() - abspath.relative_to(home_path) - except ValueError: + container_home_path = PurePosixPath("/home/work") + container_abspath = PurePosixPath(os.path.normpath(container_home_path / container_path)) + if not container_abspath.is_relative_to(container_home_path): raise PermissionError("You cannot download files outside /home/work") async with watch.Watch().stream( core_api.connect_get_namespaced_pod_exec, self.kernel_id, "backend-ai", - command=["tar", "cf", "-", abspath.resolve()], + command=["tar", "cf", "-", container_abspath], stderr=True, stdin=True, stdout=True, @@ -275,24 +284,25 @@ async def download_file(self, filepath: str): ) as stream: async for event in stream: log.debug("stream: {}", event) + # TODO: retrieve the output stream as a bytes buffer + return b"" - return None - - async def download_single(self, filepath: str): + @override + async def download_single(self, container_path: os.PathLike | str) -> bytes: # TODO: Implement download single file operations with pure Kubernetes API log.error("download_single() in the k8s backend is not supported yet.") raise NotImplementedError - async def list_files(self, container_path: str): + @override + async def list_files(self, container_path: os.PathLike | str): # TODO: Implement file operations with pure Kubernetes API await kube_config.load_kube_config() core_api = kube_client.CoreV1Api() # Confine the lookable paths in the home directory - home_path = Path("/home/work").resolve() - resolved_path = (home_path / container_path).resolve() - - if str(os.path.commonpath([resolved_path, home_path])) != str(home_path): + container_home_path = PurePosixPath("/home/work") + container_abspath = PurePosixPath(os.path.normpath(container_home_path / container_path)) + if not container_abspath.is_relative_to(container_home_path): raise PermissionError("You cannot list files outside /home/work") # Gather individual file information in the target path. @@ -336,6 +346,7 @@ async def list_files(self, container_path: str): ) as stream: async for event in stream: log.debug("stream: {}", event) + # TODO: retrieve the output stream as a bytes buffer return {"files": "", "errors": "", "abspath": str(container_path)} diff --git a/src/ai/backend/agent/kubernetes/utils.py b/src/ai/backend/agent/kubernetes/utils.py index ae674d3a5e2..c9eddd20d2d 100644 --- a/src/ai/backend/agent/kubernetes/utils.py +++ b/src/ai/backend/agent/kubernetes/utils.py @@ -129,7 +129,7 @@ async def recreate(self) -> None: pass else: raise - container_config = { + container_config: dict[str, Any] = { "Image": self.image_ref, "Tty": True, "Privileged": False, diff --git a/src/ai/backend/agent/server.py b/src/ai/backend/agent/server.py index 74f1a810377..b61da3af548 100644 --- a/src/ai/backend/agent/server.py +++ b/src/ai/backend/agent/server.py @@ -404,7 +404,9 @@ async def __aexit__(self, *exc_info) -> None: # Stop receiving further requests. await self.rpc_server.__aexit__(*exc_info) self.debug_server_task.cancel() - await self.debug_server_task + await asyncio.sleep(0) + if not self.debug_server_task.done(): + await self.debug_server_task await self.agent.shutdown(self._stop_signal) await self.stats_monitor.cleanup() await self.error_monitor.cleanup() @@ -624,6 +626,21 @@ async def execute( ) return result + @rpc_function + @collect_error + async def trigger_batch_execution( + self, + session_id: str, + kernel_id: str, + code: str, + ) -> None: + log.info( + "rpc::trigger_batch_execution(k:{0}, s:{1}, code:{2})", kernel_id, session_id, code + ) + await self.agent.create_batch_execution_task( + SessionId(UUID(session_id)), KernelId(UUID(kernel_id)), code + ) + @rpc_function @collect_error async def start_service( diff --git a/src/ai/backend/agent/types.py b/src/ai/backend/agent/types.py index 730beb291a0..8d7d6ffe018 100644 --- a/src/ai/backend/agent/types.py +++ b/src/ai/backend/agent/types.py @@ -64,6 +64,20 @@ class Container: backend_obj: Any # used to keep the backend-specific data +class KernelLifecycleStatus(enum.StrEnum): + """ + The lifecycle status of `AbstractKernel` object. + + By default, the state of a newly created kernel is `PREPARING`. + The state of a kernel changes from `PREPARING` to `RUNNING` after the kernel starts a container successfully. + It changes from `RUNNING` to `TERMINATING` before destroy kernel. + """ + + PREPARING = enum.auto() + RUNNING = enum.auto() + TERMINATING = enum.auto() + + class LifecycleEvent(enum.IntEnum): DESTROY = 0 CLEAN = 1 diff --git a/src/ai/backend/cli/BUILD b/src/ai/backend/cli/BUILD index ce944baac0d..979bcff8034 100644 --- a/src/ai/backend/cli/BUILD +++ b/src/ai/backend/cli/BUILD @@ -13,6 +13,7 @@ visibility_private_component( "//src/ai/backend/storage/**", "//src/ai/backend/web/**", "//src/ai/backend/install/**", + "//src/ai/backend/wsproxy/**", ], allowed_dependencies=[ "//src/ai/backend/plugin/**", diff --git a/src/ai/backend/client/cli/image.py b/src/ai/backend/client/cli/image.py index 26a62de820c..ec38a7ac1b0 100644 --- a/src/ai/backend/client/cli/image.py +++ b/src/ai/backend/client/cli/image.py @@ -2,7 +2,6 @@ import click -# from ai.backend.client.output.fields import image_fields from ai.backend.cli.main import main from ai.backend.cli.types import ExitCode from ai.backend.client.exceptions import BackendAPIError diff --git a/src/ai/backend/client/cli/session/lifecycle.py b/src/ai/backend/client/cli/session/lifecycle.py index ec5975b73ff..d0c3185df1c 100644 --- a/src/ai/backend/client/cli/session/lifecycle.py +++ b/src/ai/backend/client/cli/session/lifecycle.py @@ -568,6 +568,12 @@ def destroy(session_names, forced, owner, stats, recursive): else: if not has_failure: print_done("Done.") + if forced: + print_warn( + "If you have destroyed a session whose status is one of " + "[`PULLING`, `SCHEDULED`, `PREPARING`, `TERMINATING`, `ERROR`], " + "Manual cleanup of actual containers may be required." + ) if stats: stats = ret.get("stats", None) if ret else None if stats: @@ -742,18 +748,27 @@ def ls(session_id, path): @session.command() @click.argument("session_id", metavar="SESSID") -def logs(session_id): +@click.option( + "-k", + "--kernel", + "--kernel-id", + type=str, + default=None, + help="The target kernel id of logs. Default value is None, in which case logs of a main kernel are fetched.", +) +def logs(session_id, kernel: str | None): """ Shows the full console log of a compute session. \b SESSID: Session ID or its alias given when creating the session. """ + _kernel_id = uuid.UUID(kernel) if kernel is not None else None with Session() as session: try: print_wait("Retrieving live container logs...") - kernel = session.ComputeSession(session_id) - result = kernel.get_logs().get("result") + _session = session.ComputeSession(session_id) + result = _session.get_logs(_kernel_id).get("result") logs = result.get("logs") if "logs" in result else "" print(logs) print_done("End of logs.") diff --git a/src/ai/backend/client/func/session.py b/src/ai/backend/client/func/session.py index 382e196d936..a5dd416fee9 100644 --- a/src/ai/backend/client/func/session.py +++ b/src/ai/backend/client/func/session.py @@ -699,13 +699,15 @@ async def get_info(self): return await resp.json() @api_function - async def get_logs(self): + async def get_logs(self, kernel_id: UUID | None = None): """ Retrieves the console log of the compute session container. """ params = {} if self.owner_access_key: params["owner_access_key"] = self.owner_access_key + if kernel_id is not None: + params["kernel_id"] = str(kernel_id) prefix = get_naming(api_session.get().api_version, "path") rqst = Request( "GET", diff --git a/src/ai/backend/common/BUILD b/src/ai/backend/common/BUILD index 07506ddee4c..4cd757e910f 100644 --- a/src/ai/backend/common/BUILD +++ b/src/ai/backend/common/BUILD @@ -18,6 +18,7 @@ visibility_private_component( "//src/ai/backend/test/**", "//src/ai/backend/testutils/**", "//src/ai/backend/install/**", + "//src/ai/backend/wsproxy/**", ], allowed_dependencies=[ "//src/ai/backend/plugin/**", diff --git a/src/ai/backend/common/events.py b/src/ai/backend/common/events.py index 7b4ae3acb61..c6ed9fe0538 100644 --- a/src/ai/backend/common/events.py +++ b/src/ai/backend/common/events.py @@ -235,6 +235,7 @@ class KernelLifecycleEventReason(enum.StrEnum): UNKNOWN = "unknown" USER_REQUESTED = "user-requested" NOT_FOUND_IN_MANAGER = "not-found-in-manager" + CONTAINER_NOT_FOUND = "container-not-found" @classmethod def from_value(cls, value: Optional[str]) -> Optional[KernelLifecycleEventReason]: diff --git a/src/ai/backend/common/redis_helper.py b/src/ai/backend/common/redis_helper.py index 55a27a7aaf1..ec0ffd0e784 100644 --- a/src/ai/backend/common/redis_helper.py +++ b/src/ai/backend/common/redis_helper.py @@ -7,15 +7,13 @@ import time from typing import ( Any, - AsyncIterator, + AsyncGenerator, Awaitable, Callable, - Dict, Mapping, MutableMapping, Optional, Sequence, - Tuple, Union, cast, ) @@ -69,7 +67,7 @@ # "timeout": 20.0, # for redis-py 5.0+ } -_scripts: Dict[str, str] = {} +_scripts: dict[str, str] = {} log = BraceStyleAdapter(logging.getLogger(__spec__.name)) # type: ignore[name-defined] @@ -78,7 +76,11 @@ class ConnectionNotAvailable(Exception): pass -async def subscribe(channel: PubSub, *, reconnect_poll_interval: float = 0.3) -> AsyncIterator[Any]: +async def subscribe( + channel: PubSub, + *, + reconnect_poll_interval: float = 0.3, +) -> AsyncGenerator[Any, None]: """ An async-generator wrapper for pub-sub channel subscription. It automatically recovers from server shutdowns until explicitly cancelled. @@ -102,9 +104,9 @@ async def _reset_chan(): if message is not None: yield message["data"] except ( - redis.exceptions.ConnectionError, MasterNotFoundError, SlaveNotFoundError, + redis.exceptions.ConnectionError, redis.exceptions.ReadOnlyError, ConnectionResetError, ConnectionNotAvailable, @@ -113,7 +115,7 @@ async def _reset_chan(): await _reset_chan() continue except redis.exceptions.ResponseError as e: - if len(e.args) > 0 and e.args[0].startswith("NOREPLICAS "): + if len(e.args) > 0 and e.args[0].upper().startswith("NOREPLICAS "): await asyncio.sleep(reconnect_poll_interval) await _reset_chan() continue @@ -131,7 +133,7 @@ async def blpop( key: str, *, service_name: Optional[str] = None, -) -> AsyncIterator[Any]: +) -> AsyncGenerator[bytes, None]: """ An async-generator wrapper for blpop (blocking left pop). It automatically recovers from server shutdowns until explicitly cancelled. @@ -150,16 +152,16 @@ async def blpop( continue yield raw_msg[1] except ( - redis.exceptions.ConnectionError, MasterNotFoundError, SlaveNotFoundError, + redis.exceptions.ConnectionError, redis.exceptions.ReadOnlyError, ConnectionResetError, ): await asyncio.sleep(reconnect_poll_interval) continue except redis.exceptions.ResponseError as e: - if e.args[0].startswith("NOREPLICAS "): + if e.args[0].upper().startswith("NOREPLICAS "): await asyncio.sleep(reconnect_poll_interval) continue raise @@ -333,7 +335,7 @@ async def read_stream( stream_key: str, *, block_timeout: int = 10_000, # in msec -) -> AsyncIterator[Tuple[bytes, bytes]]: +) -> AsyncGenerator[tuple[bytes, bytes], None]: """ A high-level wrapper for the XREAD command. """ @@ -377,7 +379,7 @@ async def read_stream_by_group( *, autoclaim_idle_timeout: int = 1_000, # in msec block_timeout: int = 10_000, # in msec -) -> AsyncIterator[Tuple[bytes, bytes]]: +) -> AsyncGenerator[tuple[bytes, Any], None]: """ A high-level wrapper for the XREADGROUP command combined with XAUTOCLAIM and XGROUP_CREATE. diff --git a/src/ai/backend/helpers/package.py b/src/ai/backend/helpers/package.py index 9f3e30b14a6..96fa5e0a5e1 100644 --- a/src/ai/backend/helpers/package.py +++ b/src/ai/backend/helpers/package.py @@ -35,7 +35,7 @@ def install(pkgname, force_install=False): if force_install: cmdargs.append("-I") cmdargs.append(pkgname) - subprocess.call(cmdargs) + subprocess.run(cmdargs) sys.stdout.flush() # Ensure the user site directory to be in sys.path diff --git a/src/ai/backend/install/README.md b/src/ai/backend/install/README.md index 4a86c5e3a7b..11234013d1a 100644 --- a/src/ai/backend/install/README.md +++ b/src/ai/backend/install/README.md @@ -19,7 +19,7 @@ First, install the textual-dev package in the `python-default` venv. Open two terminal sessions. In the first one, run: ```shell -dist/export/python/virtualenvs/python-default/3.12.2/bin/textual console +dist/export/python/virtualenvs/python-default/3.12.4/bin/textual console ``` > **Warning** diff --git a/src/ai/backend/install/cli.py b/src/ai/backend/install/cli.py index bd4fa7f6632..9b0530f4d65 100644 --- a/src/ai/backend/install/cli.py +++ b/src/ai/backend/install/cli.py @@ -272,21 +272,22 @@ def compose(self) -> ComposeResult: """ ) ) - with TabPane("Local Proxy", id="local-proxy"): + with TabPane("Local Proxy", id="wsproxy"): yield Markdown( textwrap.dedent( f""" Run the following commands in a separate shell: ```console $ cd {self.install_info.base_path.resolve()} - $ ./backendai-local-proxy + $ ./backendai-wsproxy wsproxy start-server ``` It works if the console output ends with something like: ``` ... - info [manager.js]: Listening on port {service.local_proxy_addr.bind.port}! - info [local_proxy.js]: Proxy is ready: http://{service.local_proxy_addr.face.host}:{service.local_proxy_addr.face.port}/ + 2024-07-03 13:19:44.536 INFO ai.backend.wsproxy.proxy.frontend.http.port [2596460] accepting proxy requests from 0.0.0.0:10200~10300 + 2024-07-03 13:19:44.536 INFO ai.backend.wsproxy.server [2596460] started handling API requests at 0.0.0.0:{service.local_proxy_addr.bind.port} + ... ``` To terminate, send SIGINT or press Ctrl+C in the console. diff --git a/src/ai/backend/install/configs/wsproxy.toml b/src/ai/backend/install/configs/wsproxy.toml new file mode 120000 index 00000000000..6bf1de22bad --- /dev/null +++ b/src/ai/backend/install/configs/wsproxy.toml @@ -0,0 +1 @@ +../../../../../configs/wsproxy/halfstack.toml \ No newline at end of file diff --git a/src/ai/backend/install/context.py b/src/ai/backend/install/context.py index cb9144f8ed2..94051f1fe51 100644 --- a/src/ai/backend/install/context.py +++ b/src/ai/backend/install/context.py @@ -5,9 +5,11 @@ import importlib.resources import json import os +import random import re import secrets import shutil +import tempfile from abc import ABCMeta, abstractmethod from contextlib import asynccontextmanager as actxmgr from contextlib import contextmanager @@ -15,7 +17,7 @@ from datetime import datetime from functools import partial from pathlib import Path -from typing import Any, AsyncIterator, Iterator, Sequence +from typing import Any, AsyncIterator, Final, Iterator, Sequence import aiofiles import aiotools @@ -61,6 +63,12 @@ from .widgets import SetupLog current_log: ContextVar[SetupLog] = ContextVar("current_log") +PASSPHRASE_CHARACTER_POOL: Final[list[str]] = ( + [chr(x) for x in range(ord("a"), ord("z") + 1)] + + [chr(x) for x in range(ord("A"), ord("Z") + 1)] + + [chr(x) for x in range(ord("0"), ord("9") + 1)] + + ["*$./"] +) class PostGuide(enum.Enum): @@ -103,11 +111,11 @@ def log_header(self, title: str) -> None: self.log.write(Text.from_markup(f"[bright_green]:green_circle: {title}")) def mangle_pkgname(self, name: str, fat: bool = False) -> str: - # local-proxy does not have fat variant. (It is always fat.) - if fat and name != "local-proxy": - return f"backendai-{name}-fat-{self.os_info.platform}" return f"backendai-{name}-{self.os_info.platform}" + def generate_passphrase(self, len=16) -> str: + return "".join(random.sample(PASSPHRASE_CHARACTER_POOL, len)) + @staticmethod @contextmanager def resource_path(pkg: str, filename: str) -> Iterator[Path]: @@ -263,6 +271,7 @@ async def install_halfstack(self) -> None: await self.run_shell( f""" {sudo} docker compose pull && \\ + {sudo} docker compose up -d --wait backendai-half-db && \\ {sudo} docker compose up -d && \\ {sudo} docker compose ps """, @@ -283,6 +292,23 @@ async def load_fixtures(self) -> None: "ai.backend.install.fixtures", "example-resource-presets.json" ) as path: await self.run_manager_cli(["mgr", "fixture", "populate", str(path)]) + with tempfile.TemporaryDirectory() as tmpdir: + service = self.install_info.service_config + fixture_path = Path(tmpdir) / "fixture.json" + with open(fixture_path, "w") as fw: + fw.write( + json.dumps({ + "__mode": "update", + "scaling_groups": [ + { + "name": "default", + "wsproxy_addr": f"http://{service.local_proxy_addr.face.host}:{service.local_proxy_addr.face.port}", + "wsproxy_api_token": service.wsproxy_api_token, + } + ], + }) + ) + await self.run_manager_cli(["mgr", "fixture", "populate", fixture_path.as_posix()]) async def check_prerequisites(self) -> None: self.os_info = await detect_os() @@ -540,6 +566,23 @@ async def configure_webserver(self) -> None: with conf_path.open("w") as fp: tomlkit.dump(data, fp) + async def configure_wsproxy(self) -> None: + conf_path = self.copy_config("wsproxy.toml") + halfstack = self.install_info.halfstack_config + service = self.install_info.service_config + assert halfstack.redis_addr is not None + with conf_path.open("r") as fp: + data = tomlkit.load(fp) + data["wsproxy"]["bind_host"] = service.local_proxy_addr.bind.host # type: ignore + data["wsproxy"]["advertised_host"] = service.local_proxy_addr.face.host # type: ignore + data["wsproxy"]["bind_api_port"] = service.local_proxy_addr.bind.port # type: ignore + data["wsproxy"]["advertised_api_port"] = service.local_proxy_addr.face.port # type: ignore + data["wsproxy"]["jwt_encrypt_key"] = service.wsproxy_jwt_key # type: ignore + data["wsproxy"]["permit_hash_key"] = service.wsproxy_hash_key # type: ignore + data["wsproxy"]["api_secret"] = service.wsproxy_api_token # type: ignore + with conf_path.open("w") as fp: + tomlkit.dump(data, fp) + async def configure_webui(self) -> None: dotenv_path = self.install_info.base_path / ".env" service = self.install_info.service_config @@ -797,6 +840,9 @@ def hydrate_install_info(self) -> InstallInfo: storage_agent_ipc_base_path="ipc/storage-agent", storage_agent_var_base_path="var/storage-agent", vfolder_relpath="vfolder/local/volume1", + wsproxy_hash_key=self.generate_passphrase(), + wsproxy_jwt_key=self.generate_passphrase(), + wsproxy_api_token=self.generate_passphrase(), ) return InstallInfo( version=self.dist_info.version, @@ -835,6 +881,8 @@ async def configure(self) -> None: self.log_header("Configuring webserver and webui...") await self.configure_webserver() await self.configure_webui() + self.log_header("Configuring wsproxy...") + await self.configure_wsproxy() self.log_header("Generating client environ configs...") await self.configure_client() self.log_header("Loading fixtures...") @@ -884,6 +932,9 @@ def hydrate_install_info(self) -> InstallInfo: storage_agent_ipc_base_path="ipc/storage-agent", storage_agent_var_base_path="var/storage-agent", vfolder_relpath="vfolder/local/volume1", + wsproxy_hash_key=self.generate_passphrase(), + wsproxy_jwt_key=self.generate_passphrase(), + wsproxy_api_token=self.generate_passphrase(), ) return InstallInfo( version=self.dist_info.version, @@ -985,7 +1036,7 @@ async def install(self) -> None: tg.create_task(self._fetch_package("agent", vpane)) tg.create_task(self._fetch_package("agent-watcher", vpane)) tg.create_task(self._fetch_package("webserver", vpane)) - tg.create_task(self._fetch_package("local-proxy", vpane)) + tg.create_task(self._fetch_package("wsproxy", vpane)) tg.create_task(self._fetch_package("storage-proxy", vpane)) tg.create_task(self._fetch_package("client", vpane)) # Verify the checksums of the downloaded packages. @@ -993,7 +1044,7 @@ async def install(self) -> None: await self._verify_package("agent", fat=False) await self._verify_package("agent-watcher", fat=False) await self._verify_package("webserver", fat=False) - await self._verify_package("local-proxy", fat=False) + await self._verify_package("wsproxy", fat=False) await self._verify_package("storage-proxy", fat=False) await self._verify_package("client", fat=False) case PackageSource.LOCAL_DIR: @@ -1008,9 +1059,7 @@ async def install(self) -> None: await self._install_package( "webserver", vpane, fat=self.dist_info.use_fat_binary ) - await self._install_package( - "local-proxy", vpane, fat=self.dist_info.use_fat_binary - ) + await self._install_package("wsproxy", vpane, fat=self.dist_info.use_fat_binary) await self._install_package( "storage-proxy", vpane, fat=self.dist_info.use_fat_binary ) @@ -1020,7 +1069,7 @@ async def install(self) -> None: await self._verify_package("agent", fat=self.dist_info.use_fat_binary) await self._verify_package("agent-watcher", fat=self.dist_info.use_fat_binary) await self._verify_package("webserver", fat=self.dist_info.use_fat_binary) - await self._verify_package("local-proxy", fat=self.dist_info.use_fat_binary) + await self._verify_package("wsproxy", fat=self.dist_info.use_fat_binary) await self._verify_package("storage-proxy", fat=self.dist_info.use_fat_binary) await self._verify_package("client", fat=self.dist_info.use_fat_binary) finally: @@ -1038,6 +1087,8 @@ async def configure(self) -> None: self.log_header("Configuring webserver and webui...") await self.configure_webserver() await self.configure_webui() + self.log_header("Configuring wsproxy...") + await self.configure_wsproxy() self.log_header("Generating client environ configs...") await self.configure_client() self.log_header("Loading fixtures...") diff --git a/src/ai/backend/install/dev.py b/src/ai/backend/install/dev.py index e57f728ba76..32189f3a28c 100644 --- a/src/ai/backend/install/dev.py +++ b/src/ai/backend/install/dev.py @@ -107,7 +107,7 @@ async def install_editable_webui(ctx: Context) -> None: echo "PROXYBASEHOST=localhost" >> .env echo "PROXYBASEPORT=${WSPROXY_PORT}" >> .env fi - npm i + pnpm i make compile make compile_wsproxy cd ../../../.. diff --git a/src/ai/backend/install/types.py b/src/ai/backend/install/types.py index 73f460738da..143fe386227 100644 --- a/src/ai/backend/install/types.py +++ b/src/ai/backend/install/types.py @@ -159,3 +159,6 @@ class ServiceConfig: storage_agent_var_base_path: str storage_watcher_addr: ServerAddr vfolder_relpath: str + wsproxy_hash_key: str + wsproxy_jwt_key: str + wsproxy_api_token: str diff --git a/src/ai/backend/manager/api/events.py b/src/ai/backend/manager/api/events.py index 36f758bac36..30c32aca1b0 100644 --- a/src/ai/backend/manager/api/events.py +++ b/src/ai/backend/manager/api/events.py @@ -23,6 +23,7 @@ from aiohttp import web from aiohttp_sse import sse_response from aiotools import adefer +from sqlalchemy.orm import load_only from ai.backend.common import validators as tx from ai.backend.common.events import ( @@ -51,6 +52,7 @@ from ai.backend.common.types import AgentId from ..models import UserRole, groups, kernels +from ..models.session import SessionRow from ..models.utils import execute_with_retry from ..types import Sentinel from .auth import auth_required @@ -275,32 +277,37 @@ async def enqueue_session_creation_status_update( root_ctx: RootContext = app["_root.context"] app_ctx: PrivateContext = app["events.context"] - async def _fetch(): - async with root_ctx.db.begin_readonly() as conn: + async def _fetch() -> SessionRow | None: + async with root_ctx.db.begin_readonly_session() as db_session: query = ( - sa.select([ - kernels.c.id, - kernels.c.session_id, - kernels.c.session_name, - kernels.c.access_key, - kernels.c.domain_name, - kernels.c.group_id, - kernels.c.user_uuid, - ]) - .select_from(kernels) - .where( - (kernels.c.id == event.session_id), - # for the main kernel, kernel ID == session ID + sa.select(SessionRow) + .where(SessionRow.id == event.session_id) + .options( + load_only( + SessionRow.id, + SessionRow.name, + SessionRow.access_key, + SessionRow.domain_name, + SessionRow.group_id, + SessionRow.user_uuid, + ) ) ) - result = await conn.execute(query) - return result.first() + return await db_session.scalar(query) row = await execute_with_retry(_fetch) if row is None: return + row_map = { + "session_id": row.id, + "session_name": row.name, + "domain_name": row.domain_name, + "user_uuid": row.user_uuid, + "group_id": row.group_id, + "access_key": row.access_key, + } for q in app_ctx.session_event_queues: - q.put_nowait((event.name, row._mapping, event.reason, None)) + q.put_nowait((event.name, row_map, event.reason, None)) async def enqueue_session_termination_status_update( @@ -311,32 +318,37 @@ async def enqueue_session_termination_status_update( root_ctx: RootContext = app["_root.context"] app_ctx: PrivateContext = app["events.context"] - async def _fetch(): - async with root_ctx.db.begin_readonly() as conn: + async def _fetch() -> SessionRow | None: + async with root_ctx.db.begin_readonly_session() as db_session: query = ( - sa.select([ - kernels.c.id, - kernels.c.session_id, - kernels.c.session_name, - kernels.c.access_key, - kernels.c.domain_name, - kernels.c.group_id, - kernels.c.user_uuid, - ]) - .select_from(kernels) - .where( - (kernels.c.session_id == event.session_id), - # for the main kernel, kernel ID == session ID + sa.select(SessionRow) + .where(SessionRow.id == event.session_id) + .options( + load_only( + SessionRow.id, + SessionRow.name, + SessionRow.access_key, + SessionRow.domain_name, + SessionRow.group_id, + SessionRow.user_uuid, + ) ) ) - result = await conn.execute(query) - return result.first() + return await db_session.scalar(query) row = await execute_with_retry(_fetch) if row is None: return + row_map = { + "session_id": row.id, + "session_name": row.name, + "domain_name": row.domain_name, + "user_uuid": row.user_uuid, + "group_id": row.group_id, + "access_key": row.access_key, + } for q in app_ctx.session_event_queues: - q.put_nowait((event.name, row._mapping, event.reason, None)) + q.put_nowait((event.name, row_map, event.reason, None)) async def enqueue_batch_task_result_update( @@ -347,31 +359,37 @@ async def enqueue_batch_task_result_update( root_ctx: RootContext = app["_root.context"] app_ctx: PrivateContext = app["events.context"] - async def _fetch(): - async with root_ctx.db.begin_readonly() as conn: + async def _fetch() -> SessionRow | None: + async with root_ctx.db.begin_readonly_session() as db_session: query = ( - sa.select([ - kernels.c.id, - kernels.c.session_id, - kernels.c.session_name, - kernels.c.access_key, - kernels.c.domain_name, - kernels.c.group_id, - kernels.c.user_uuid, - ]) - .select_from(kernels) - .where( - (kernels.c.session_id == event.session_id), + sa.select(SessionRow) + .where(SessionRow.id == event.session_id) + .options( + load_only( + SessionRow.id, + SessionRow.name, + SessionRow.access_key, + SessionRow.domain_name, + SessionRow.group_id, + SessionRow.user_uuid, + ) ) ) - result = await conn.execute(query) - return result.first() + return await db_session.scalar(query) row = await execute_with_retry(_fetch) if row is None: return + row_map = { + "session_id": row.id, + "session_name": row.name, + "domain_name": row.domain_name, + "user_uuid": row.user_uuid, + "group_id": row.group_id, + "access_key": row.access_key, + } for q in app_ctx.session_event_queues: - q.put_nowait((event.name, row._mapping, event.reason, event.exit_code)) + q.put_nowait((event.name, row_map, event.reason, event.exit_code)) @attrs.define(slots=True, auto_attribs=True, init=False) diff --git a/src/ai/backend/manager/api/exceptions.py b/src/ai/backend/manager/api/exceptions.py index 3a1166f7ba5..5c291ec562d 100644 --- a/src/ai/backend/manager/api/exceptions.py +++ b/src/ai/backend/manager/api/exceptions.py @@ -16,6 +16,7 @@ from aiohttp import web +from ai.backend.common.json import ExtendedJSONEncoder from ai.backend.common.plugin.hook import HookResult from ..exceptions import AgentError @@ -48,7 +49,7 @@ def __init__(self, extra_msg: str = None, extra_data: Any = None, **kwargs): body["msg"] = extra_msg if extra_data is not None: body["data"] = extra_data - self.body = json.dumps(body).encode() + self.body = json.dumps(body, cls=ExtendedJSONEncoder).encode() def __str__(self): lines = [] @@ -216,6 +217,10 @@ class MainKernelNotFound(ObjectNotFound): object_name = "main kernel" +class KernelNotFound(ObjectNotFound): + object_name = "kernel" + + class EndpointNotFound(ObjectNotFound): object_name = "endpoint" diff --git a/src/ai/backend/manager/api/schema.graphql b/src/ai/backend/manager/api/schema.graphql index 26d42b3d5b1..76df1d941ec 100644 --- a/src/ai/backend/manager/api/schema.graphql +++ b/src/ai/backend/manager/api/schema.graphql @@ -311,6 +311,9 @@ type Domain { type GroupNode implements Node { """The ID of the object""" id: ID! + + """Added in 24.03.7. The undecoded id value stored in DB.""" + row_id: UUID name: String description: String is_active: Boolean @@ -321,6 +324,12 @@ type GroupNode implements Node { allowed_vfolder_hosts: JSONString integration_id: String resource_policy: String + + """Added in 24.03.7. One of ['GENERAL', 'MODEL_STORE'].""" + type: String + + """Added in 24.03.7.""" + container_registry: JSONString scaling_groups: [String] user_nodes(filter: String, order: String, offset: Int, before: String, after: String, first: Int, last: Int): UserConnection } @@ -507,6 +516,9 @@ type KeyPair implements Item { last_used: DateTime rate_limit: Int num_queries: Int + + """Added in 24.09.0.""" + rolling_count: Int user: UUID projects: [String] ssh_public_key: String @@ -527,6 +539,9 @@ type VirtualFolder implements Item { group: UUID group_name: String creator: String + + """Added in 24.09.0.""" + domain_name: String unmanaged_path: String usage_mode: String permission: String @@ -693,6 +708,22 @@ type ScalingGroup { scheduler: String scheduler_opts: JSONString use_host_network: Boolean + + """Added in 24.03.7.""" + agent_count_by_status( + """ + Possible states of an agent. Should be one of ['ALIVE', 'LOST', 'RESTARTING', 'TERMINATED']. Default is 'ALIVE'. + """ + status: String = "ALIVE" + ): Int + + """Added in 24.03.7.""" + agent_total_resource_slots_by_status( + """ + Possible states of an agent. Should be one of ['ALIVE', 'LOST', 'RESTARTING', 'TERMINATED']. Default is 'ALIVE'. + """ + status: String = "ALIVE" + ): JSONString } type StorageVolume implements Item { @@ -1005,6 +1036,9 @@ type ModelCard implements Node { id: ID! name: String vfolder: VirtualFolder + + """Added in 24.09.0.""" + vfolder_node: VirtualFolderNode author: String """Human readable name of the model.""" diff --git a/src/ai/backend/manager/api/service.py b/src/ai/backend/manager/api/service.py index 7a784824718..1455f4c2fc8 100644 --- a/src/ai/backend/manager/api/service.py +++ b/src/ai/backend/manager/api/service.py @@ -1195,7 +1195,7 @@ class RuntimeInfoModel(BaseModel): async def list_supported_runtimes(request: web.Request) -> RuntimeInfoModel: return RuntimeInfoModel( runtimes=[ - RuntimeInfo(name=v.name, human_readable_name=MODEL_SERVICE_RUNTIME_PROFILES[v].name) + RuntimeInfo(name=v.value, human_readable_name=MODEL_SERVICE_RUNTIME_PROFILES[v].name) for v in RuntimeVariant ] ) diff --git a/src/ai/backend/manager/api/session.py b/src/ai/backend/manager/api/session.py index 41c7be61128..9446381372d 100644 --- a/src/ai/backend/manager/api/session.py +++ b/src/ai/backend/manager/api/session.py @@ -43,7 +43,7 @@ import trafaret as t from aiohttp import hdrs, web from dateutil.tz import tzutc -from pydantic import BaseModel, Field +from pydantic import AliasChoices, BaseModel, Field from redis.asyncio import Redis from sqlalchemy.orm import noload, selectinload from sqlalchemy.sql.expression import null, true @@ -73,6 +73,7 @@ AgentId, ClusterMode, ImageRegistry, + KernelId, MountPermission, MountTypes, SessionTypes, @@ -1346,8 +1347,9 @@ async def rename_session(request: web.Request, params: Any) -> web.Response: async def destroy(request: web.Request, params: Any) -> web.Response: root_ctx: RootContext = request.app["_root.context"] session_name = request.match_info["session_name"] + user_role = cast(UserRole, request["user"]["role"]) requester_access_key, owner_access_key = await get_access_key_scopes(request, params) - if requester_access_key != owner_access_key and request["user"]["role"] not in ( + if requester_access_key != owner_access_key and user_role not in ( UserRole.ADMIN, UserRole.SUPERADMIN, ): @@ -1395,7 +1397,9 @@ async def destroy(request: web.Request, params: Any) -> web.Response: last_stats = await asyncio.gather( *[ - root_ctx.registry.destroy_session(sess, forced=params["forced"]) + root_ctx.registry.destroy_session( + sess, forced=params["forced"], user_role=user_role + ) for sess in sessions if isinstance(sess, SessionRow) ], @@ -1420,6 +1424,7 @@ async def destroy(request: web.Request, params: Any) -> web.Response: last_stat = await root_ctx.registry.destroy_session( session, forced=params["forced"], + user_role=user_role, ) resp = { "stats": last_stat, @@ -2105,19 +2110,36 @@ async def list_files(request: web.Request) -> web.Response: return web.json_response(resp, status=200) +class ContainerLogRequestModel(BaseModel): + owner_access_key: str | None = Field( + validation_alias=AliasChoices("owner_access_key", "ownerAccessKey"), + default=None, + ) + kernel_id: uuid.UUID | None = Field( + validation_alias=AliasChoices("kernel_id", "kernelId"), + description="Target kernel to get container logs.", + default=None, + ) + + @server_status_required(READ_ALLOWED) @auth_required -@check_api_params( - t.Dict({ - t.Key("owner_access_key", default=None): t.Null | t.String, - }) -) -async def get_container_logs(request: web.Request, params: Any) -> web.Response: +@pydantic_params_api_handler(ContainerLogRequestModel) +async def get_container_logs( + request: web.Request, params: ContainerLogRequestModel +) -> web.Response: root_ctx: RootContext = request.app["_root.context"] session_name: str = request.match_info["session_name"] - requester_access_key, owner_access_key = await get_access_key_scopes(request, params) + requester_access_key, owner_access_key = await get_access_key_scopes( + request, {"owner_access_key": params.owner_access_key} + ) + kernel_id = KernelId(params.kernel_id) if params.kernel_id is not None else None log.info( - "GET_CONTAINER_LOG (ak:{}/{}, s:{})", requester_access_key, owner_access_key, session_name + "GET_CONTAINER_LOG (ak:{}/{}, s:{}, k:{})", + requester_access_key, + owner_access_key, + session_name, + kernel_id, ) resp = {"result": {"logs": ""}} async with root_ctx.db.begin_readonly_session() as db_sess: @@ -2126,25 +2148,38 @@ async def get_container_logs(request: web.Request, params: Any) -> web.Response: session_name, owner_access_key, allow_stale=True, - kernel_loading_strategy=KernelLoadingStrategy.MAIN_KERNEL_ONLY, + kernel_loading_strategy=KernelLoadingStrategy.MAIN_KERNEL_ONLY + if kernel_id is None + else KernelLoadingStrategy.ALL_KERNELS, ) - if ( - compute_session.status in DEAD_SESSION_STATUSES - and compute_session.main_kernel.container_log is not None - ): - log.debug("returning log from database record") - resp["result"]["logs"] = compute_session.main_kernel.container_log.decode("utf-8") - return web.json_response(resp, status=200) + + if compute_session.status in DEAD_SESSION_STATUSES: + if kernel_id is not None: + # Get logs from the specific kernel + kernel_row = compute_session.get_kernel_by_id(kernel_id) + kernel_log = kernel_row.container_log + else: + # Get logs from the main kernel + kernel_log = compute_session.main_kernel.container_log + if kernel_log is not None: + # Get logs from database record + log.debug("returning log from database record") + resp["result"]["logs"] = kernel_log.decode("utf-8") + return web.json_response(resp, status=200) + try: registry = root_ctx.registry await registry.increment_session_usage(compute_session) - resp["result"]["logs"] = await registry.get_logs_from_agent(compute_session) + resp["result"]["logs"] = await registry.get_logs_from_agent( + session=compute_session, kernel_id=kernel_id + ) log.debug("returning log from agent") except BackendError: log.exception( - "GET_CONTAINER_LOG(ak:{}/{}, s:{}): unexpected error", + "GET_CONTAINER_LOG(ak:{}/{}, kernel_id: {}, s:{}): unexpected error", requester_access_key, owner_access_key, + kernel_id, session_name, ) raise diff --git a/src/ai/backend/manager/api/vfolder.py b/src/ai/backend/manager/api/vfolder.py index e216eb57266..59667f87813 100644 --- a/src/ai/backend/manager/api/vfolder.py +++ b/src/ai/backend/manager/api/vfolder.py @@ -25,6 +25,7 @@ Sequence, Tuple, TypeAlias, + cast, ) import aiohttp @@ -199,6 +200,8 @@ async def resolve_vfolder_rows( request: web.Request, perm: VFolderPermissionSetAlias | VFolderPermission | str, folder_id_or_name: str | uuid.UUID, + *, + allowed_status_set: VFolderStatusSet | None = None, ) -> Sequence[VFolderRow]: """ Checks if the target VFolder exists and is either: @@ -252,6 +255,7 @@ async def resolve_vfolder_rows( extra_invited_vf_conds=invited_perm_cond, extra_vf_user_conds=vf_user_cond, extra_vf_group_conds=vf_group_cond, + allowed_status_set=allowed_status_set, ) if len(entries) == 0: raise VFolderNotFound(extra_data=folder_id_or_name) @@ -388,6 +392,8 @@ async def create(request: web.Request, params: Any) -> web.Response: group_uuid: uuid.UUID | None = None group_type: ProjectType | None = None + max_vfolder_count: int + max_quota_scope_size: int async with root_ctx.db.begin_session() as sess: match group_id_or_name: @@ -402,16 +408,18 @@ async def create(request: web.Request, params: Any) -> web.Response: .options(selectinload(GroupRow.resource_policy_row)) ) result = await sess.execute(query) - group_row = result.scalar() + group_row = cast(GroupRow | None, result.scalar()) + if group_row is None: + raise GroupNotFound(extra_data=group_id_or_name) _gid, max_vfolder_count, max_quota_scope_size = ( - group_row.id, - group_row.resource_policy_row.max_vfolder_count, - group_row.resource_policy_row.max_quota_scope_size, + cast(uuid.UUID | None, group_row.id), + cast(int, group_row.resource_policy_row.max_vfolder_count), + cast(int, group_row.resource_policy_row.max_quota_scope_size), ) if _gid is None: raise GroupNotFound(extra_data=group_id_or_name) group_uuid = _gid - group_type = group_row.type + group_type = cast(ProjectType, group_row.type) case uuid.UUID(): # Check if the group belongs to the current domain. log.debug("group_id_or_name(uuid):{}", group_id_or_name) @@ -423,16 +431,18 @@ async def create(request: web.Request, params: Any) -> web.Response: .options(selectinload(GroupRow.resource_policy_row)) ) result = await sess.execute(query) - group_row = result.scalar() + group_row = cast(GroupRow | None, result.scalar()) + if group_row is None: + raise GroupNotFound(extra_data=group_id_or_name) _gid, max_vfolder_count, max_quota_scope_size = ( group_row.id, - group_row.resource_policy_row.max_vfolder_count, - group_row.resource_policy_row.max_quota_scope_size, + cast(int, group_row.resource_policy_row.max_vfolder_count), + cast(int, group_row.resource_policy_row.max_quota_scope_size), ) if _gid is None: raise GroupNotFound(extra_data=group_id_or_name) group_uuid = group_id_or_name - group_type = group_row.type + group_type = cast(ProjectType, group_row.type) case None: query = ( sa.select(UserRow) @@ -442,8 +452,8 @@ async def create(request: web.Request, params: Any) -> web.Response: result = await sess.execute(query) user_row = result.scalar() max_vfolder_count, max_quota_scope_size = ( - user_row.resource_policy_row.max_vfolder_count, - user_row.resource_policy_row.max_quota_scope_size, + cast(int, user_row.resource_policy_row.max_vfolder_count), + cast(int, user_row.resource_policy_row.max_quota_scope_size), ) case _: raise GroupNotFound(extra_data=group_id_or_name) @@ -491,16 +501,27 @@ async def create(request: web.Request, params: Any) -> web.Response: # Check resource policy's max_vfolder_count if max_vfolder_count > 0: - query = ( - sa.select([sa.func.count()]) - .select_from(vfolders) - .where( - (vfolders.c.user == user_uuid) - & ~(vfolders.c.status.in_(HARD_DELETED_VFOLDER_STATUSES)) + if ownership_type == "user": + query = ( + sa.select([sa.func.count()]) + .select_from(vfolders) + .where( + (vfolders.c.user == user_uuid) + & (vfolders.c.status.not_in(HARD_DELETED_VFOLDER_STATUSES)) + ) ) - ) - result = await conn.scalar(query) - if result >= max_vfolder_count and ownership_type == "user": + else: + assert group_uuid is not None + query = ( + sa.select([sa.func.count()]) + .select_from(vfolders) + .where( + (vfolders.c.group == group_uuid) + & (vfolders.c.status.not_in(HARD_DELETED_VFOLDER_STATUSES)) + ) + ) + result = cast(int, await conn.scalar(query)) + if result >= max_vfolder_count: raise InvalidAPIParameters("You cannot create more vfolders.") # DEPRECATED: Limit vfolder size quota if it is larger than max_vfolder_size of the resource policy. @@ -723,18 +744,22 @@ async def fetch_exposed_volume_fields( }, ) as (_, storage_resp): storage_reply = await storage_resp.json() + storage_used_bytes = storage_reply[ExposedVolumeInfoField.used_bytes] + storage_capacity_bytes = storage_reply[ExposedVolumeInfoField.capacity_bytes] if show_used: - volume_usage["used"] = storage_reply[ExposedVolumeInfoField.used_bytes] + volume_usage["used"] = storage_used_bytes if show_total: - volume_usage["total"] = storage_reply[ExposedVolumeInfoField.capacity_bytes] + volume_usage["total"] = storage_capacity_bytes if show_percentage: - volume_usage["percentage"] = ( - storage_reply[ExposedVolumeInfoField.used_bytes] - / storage_reply[ExposedVolumeInfoField.capacity_bytes] - ) * 100 + try: + volume_usage["percentage"] = ( + storage_used_bytes / storage_capacity_bytes + ) * 100 + except ZeroDivisionError: + volume_usage["percentage"] = 0 await redis_helper.execute( redis_connection, @@ -2204,9 +2229,10 @@ async def _delete( allowed_vfolder_types: Sequence[str], resource_policy: Mapping[str, Any], ) -> None: - async with root_ctx.db.begin() as conn: + async with root_ctx.db.begin_readonly_session() as db_session: + db_conn = db_session.bind entries = await query_accessible_vfolders( - conn, + db_conn, user_uuid, allow_privileged_access=True, user_role=user_role, @@ -2228,18 +2254,15 @@ async def _delete( raise InvalidAPIParameters("Cannot delete the vfolder that is not owned by myself.") # perform extra check to make sure records of alive model service not removed by foreign key rule if entry["usage_mode"] == VFolderUsageMode.MODEL: - async with root_ctx.db._begin_session(conn) as sess: - live_endpoints = await EndpointRow.list_by_model(sess, entry["id"]) - if ( - len([ - e for e in live_endpoints if e.lifecycle_stage == EndpointLifecycle.CREATED - ]) - > 0 - ): - raise ModelServiceDependencyNotCleared + live_endpoints = await EndpointRow.list_by_model(db_session, entry["id"]) + if ( + len([e for e in live_endpoints if e.lifecycle_stage == EndpointLifecycle.CREATED]) + > 0 + ): + raise ModelServiceDependencyNotCleared folder_host = entry["host"] await ensure_host_permission_allowed( - conn, + db_conn, folder_host, allowed_vfolder_types=allowed_vfolder_types, user_uuid=user_uuid, @@ -2491,10 +2514,6 @@ async def purge(request: web.Request, params: PurgeRequestModel) -> web.Response root_ctx: RootContext = request.app["_root.context"] folder_id = params.vfolder_id access_key = request["keypair"]["access_key"] - domain_name = request["user"]["domain_name"] - user_role = request["user"]["role"] - user_uuid = request["user"]["uuid"] - allowed_vfolder_types = await root_ctx.shared_config.get_vfolder_types() log.info( "VFOLDER.PURGE (email:{}, ak:{}, vf:{})", request["user"]["email"], @@ -2507,33 +2526,19 @@ async def purge(request: web.Request, params: PurgeRequestModel) -> web.Response ): raise InsufficientPrivilege("You are not allowed to purge vfolders") - row = (await resolve_vfolder_rows(request, VFolderPermission.OWNER_PERM, folder_id))[0] + row = ( + await resolve_vfolder_rows( + request, + VFolderPermission.OWNER_PERM, + folder_id, + allowed_status_set=VFolderStatusSet.PURGABLE, + ) + )[0] await check_vfolder_status(row, VFolderStatusSet.PURGABLE) async with root_ctx.db.begin() as conn: - entries = await query_accessible_vfolders( - conn, - user_uuid, - allow_privileged_access=True, - user_role=user_role, - domain_name=domain_name, - allowed_vfolder_types=allowed_vfolder_types, - extra_vf_conds=(vfolders.c.id == folder_id), - ) - if len(entries) > 1: - log.error( - "VFOLDER.PURGE(folder id:{}, hosts:{}", - folder_id, - [entry["host"] for entry in entries], - ) - raise TooManyVFoldersFound( - extra_msg="Multiple folders with the same id.", - extra_data=None, - ) - elif len(entries) == 0: - raise InvalidAPIParameters("No such vfolder.") # query_accesible_vfolders returns list - entry = entries[0] + entry = row delete_stmt = sa.delete(vfolders).where(vfolders.c.id == entry["id"]) await conn.execute(delete_stmt) diff --git a/src/ai/backend/manager/cli/__main__.py b/src/ai/backend/manager/cli/__main__.py index f98bc4b2d44..b6dc2fc9c45 100644 --- a/src/ai/backend/manager/cli/__main__.py +++ b/src/ai/backend/manager/cli/__main__.py @@ -133,7 +133,7 @@ def dbshell(cli_ctx: CLIContext, container_name, psql_help, psql_args): ), *psql_args, ] - subprocess.call(cmd) + subprocess.run(cmd) return # Use the container to start the psql client command log.info(f"using the db container {container_name} ...") @@ -150,7 +150,7 @@ def dbshell(cli_ctx: CLIContext, container_name, psql_help, psql_args): local_config["db"]["name"], *psql_args, ] - subprocess.call(cmd) + subprocess.run(cmd) @main.command() diff --git a/src/ai/backend/manager/idle.py b/src/ai/backend/manager/idle.py index d0323857caf..648d30793e4 100644 --- a/src/ai/backend/manager/idle.py +++ b/src/ai/backend/manager/idle.py @@ -823,9 +823,12 @@ def get_time_window(self, policy: Row) -> timedelta: return timedelta(seconds=idle_timeout) return self.time_window - def get_last_collected_key(self, session_id: SessionId) -> str: + def _get_last_collected_key(self, session_id: SessionId) -> str: return f"session.{session_id}.util_last_collected" + def _get_first_collected_key(self, session_id: SessionId) -> str: + return f"session.{session_id}.util_first_collected" + async def check_idleness( self, kernel: Row, @@ -848,7 +851,8 @@ async def check_idleness( unavailable_resources: Set[str] = set() util_series_key = f"session.{session_id}.util_series" - util_last_collected_key = self.get_last_collected_key(session_id) + util_first_collected_key = self._get_first_collected_key(session_id) + util_last_collected_key = self._get_last_collected_key(session_id) # window_size: the length of utilization reports. window_size = int(time_window.total_seconds() / interval) @@ -858,9 +862,12 @@ async def check_idleness( # Wait until the time "interval" is passed after the last udpated time. t = await redis_helper.execute(self._redis_live, lambda r: r.time()) util_now: float = t[0] + (t[1] / (10**6)) - raw_util_last_collected = await redis_helper.execute( - self._redis_live, - lambda r: r.get(util_last_collected_key), + raw_util_last_collected = cast( + bytes | None, + await redis_helper.execute( + self._redis_live, + lambda r: r.get(util_last_collected_key), + ), ) util_last_collected: float = ( float(raw_util_last_collected) if raw_util_last_collected else 0.0 @@ -868,6 +875,26 @@ async def check_idleness( if util_now - util_last_collected < interval: return True + raw_util_first_collected = cast( + bytes | None, + await redis_helper.execute( + self._redis_live, + lambda r: r.get(util_first_collected_key), + ), + ) + if raw_util_first_collected is None: + util_first_collected = util_now + await redis_helper.execute( + self._redis_live, + lambda r: r.set( + util_first_collected_key, + f"{util_now:.06f}", + ex=max(86400, int(self.time_window.total_seconds() * 2)), + ), + ) + else: + util_first_collected = float(raw_util_first_collected) + # Report time remaining until the first time window is full as expire time db_now: datetime = await get_db_now(dbconn) kernel_created_at: datetime = kernel["created_at"] @@ -923,14 +950,19 @@ async def check_idleness( except TypeError: util_series = {k: [] for k in self.resource_thresholds.keys()} - not_enough_data = False + do_idle_check: bool = True for k in util_series: util_series[k].append(current_utilizations[k]) if len(util_series[k]) > window_size: util_series[k].pop(0) else: - not_enough_data = True + do_idle_check = False + + # Do not skip idleness-check if the current time passed the time window + if util_now - util_first_collected >= time_window.total_seconds(): + do_idle_check = True + await redis_helper.execute( self._redis_live, lambda r: r.set( @@ -972,7 +1004,7 @@ def _avg(util_list: list[float]) -> float: ), ) - if not_enough_data: + if not do_idle_check: return True # Check over-utilized (not to be collected) resources. diff --git a/src/ai/backend/manager/models/__init__.py b/src/ai/backend/manager/models/__init__.py index a8bd671340a..9e5a32e4c4b 100644 --- a/src/ai/backend/manager/models/__init__.py +++ b/src/ai/backend/manager/models/__init__.py @@ -8,6 +8,7 @@ from . import image as _image from . import kernel as _kernel from . import keypair as _keypair +from . import rbac as _rbac from . import resource_policy as _rpolicy from . import resource_preset as _rpreset from . import resource_usage as _rusage @@ -33,6 +34,7 @@ *_user.__all__, *_vfolder.__all__, *_dotfile.__all__, + *_rbac.__all__, *_rusage.__all__, *_rpolicy.__all__, *_rpreset.__all__, @@ -54,6 +56,7 @@ from .image import * # noqa from .kernel import * # noqa from .keypair import * # noqa +from .rbac import * # noqa from .resource_policy import * # noqa from .resource_preset import * # noqa from .resource_usage import * # noqa diff --git a/src/ai/backend/manager/models/acl.py b/src/ai/backend/manager/models/acl.py index 4b64b14e7b9..f57f11470c8 100644 --- a/src/ai/backend/manager/models/acl.py +++ b/src/ai/backend/manager/models/acl.py @@ -1,6 +1,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, List, Mapping, Sequence +from collections.abc import Mapping +from typing import TYPE_CHECKING, Any, List, Sequence import graphene diff --git a/src/ai/backend/manager/models/agent.py b/src/ai/backend/manager/models/agent.py index cd64a3cf78a..b4c9347f7ca 100644 --- a/src/ai/backend/manager/models/agent.py +++ b/src/ai/backend/manager/models/agent.py @@ -2,7 +2,7 @@ import enum import uuid -from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Sequence +from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Sequence, cast import graphene import sqlalchemy as sa @@ -12,7 +12,7 @@ from sqlalchemy.engine.row import Row from sqlalchemy.ext.asyncio import AsyncConnection as SAConnection from sqlalchemy.ext.asyncio import AsyncSession as SASession -from sqlalchemy.orm import relationship +from sqlalchemy.orm import relationship, selectinload, with_loader_criteria from sqlalchemy.sql.expression import false, true from ai.backend.common import msgpack, redis_helper @@ -32,16 +32,16 @@ simple_db_mutate, ) from .group import association_groups_users -from .kernel import AGENT_RESOURCE_OCCUPYING_KERNEL_STATUSES, kernels +from .kernel import AGENT_RESOURCE_OCCUPYING_KERNEL_STATUSES, KernelRow, kernels from .keypair import keypairs from .minilang.ordering import OrderSpecItem, QueryOrderParser from .minilang.queryfilter import FieldSpecItem, QueryFilterParser, enum_field_getter -from .scaling_group import query_allowed_sgroups from .user import UserRole, users if TYPE_CHECKING: from ai.backend.manager.models.gql import GraphQueryContext + __all__: Sequence[str] = ( "agents", "AgentRow", @@ -432,6 +432,8 @@ async def _append_sgroup_from_clause( domain_name: str | None, scaling_group: str | None = None, ) -> sa.sql.Select: + from .scaling_group import query_allowed_sgroups + if scaling_group is not None: query = query.where(agents.c.scaling_group == scaling_group) else: @@ -618,6 +620,28 @@ async def recalc_agent_resource_occupancy(db_conn: SAConnection, agent_id: Agent await db_conn.execute(query) +async def recalc_agent_resource_occupancy_using_orm( + db_session: SASession, agent_id: AgentId +) -> None: + agent_query = ( + sa.select(AgentRow) + .where(AgentRow.id == agent_id) + .options( + selectinload(AgentRow.kernels), + with_loader_criteria( + KernelRow, KernelRow.status.in_(AGENT_RESOURCE_OCCUPYING_KERNEL_STATUSES) + ), + ) + ) + occupied_slots = ResourceSlot() + agent_row = cast(AgentRow, await db_session.scalar(agent_query)) + kernel_rows = cast(list[KernelRow], agent_row.kernels) + for kernel in kernel_rows: + if kernel.status in AGENT_RESOURCE_OCCUPYING_KERNEL_STATUSES: + occupied_slots += kernel.occupied_slots + agent_row.occupied_slots = occupied_slots + + class ModifyAgent(graphene.Mutation): allowed_roles = (UserRole.SUPERADMIN,) diff --git a/src/ai/backend/manager/models/alembic/versions/59a622c31820_remove_foreign_key_in_vfolders_user_and_project.py b/src/ai/backend/manager/models/alembic/versions/59a622c31820_remove_foreign_key_in_vfolders_user_and_project.py new file mode 100644 index 00000000000..a3e659e45eb --- /dev/null +++ b/src/ai/backend/manager/models/alembic/versions/59a622c31820_remove_foreign_key_in_vfolders_user_and_project.py @@ -0,0 +1,38 @@ +"""Remove foreign key constraint from vfolders to users and projects + +Revision ID: 59a622c31820 +Revises: fdb2dcdb8811 +Create Date: 2024-07-08 22:54:20.762521 + +""" + +from alembic import op + +# revision identifiers, used by Alembic. +revision = "59a622c31820" +down_revision = "fdb2dcdb8811" +branch_labels = None +depends_on = None + + +def upgrade(): + op.drop_constraint("fk_vfolders_user_users", "vfolders", type_="foreignkey") + op.drop_constraint("fk_vfolders_group_groups", "vfolders", type_="foreignkey") + op.drop_constraint("ck_vfolders_ownership_type_match_with_user_or_group", "vfolders") + op.drop_constraint("ck_vfolders_either_one_of_user_or_group", "vfolders") + + +def downgrade(): + op.create_foreign_key("fk_vfolders_group_groups", "vfolders", "groups", ["group"], ["id"]) + op.create_foreign_key("fk_vfolders_user_users", "vfolders", "users", ["user"], ["uuid"]) + op.create_check_constraint( + "ck_vfolders_ownership_type_match_with_user_or_group", + "vfolders", + "(ownership_type = 'user' AND \"user\" IS NOT NULL) OR " + "(ownership_type = 'group' AND \"group\" IS NOT NULL)", + ) + op.create_check_constraint( + "ck_vfolders_either_one_of_user_or_group", + "vfolders", + '("user" IS NULL AND "group" IS NOT NULL) OR ("user" IS NOT NULL AND "group" IS NULL)', + ) diff --git a/src/ai/backend/manager/models/endpoint.py b/src/ai/backend/manager/models/endpoint.py index c466b17bff6..061b7d98066 100644 --- a/src/ai/backend/manager/models/endpoint.py +++ b/src/ai/backend/manager/models/endpoint.py @@ -702,7 +702,7 @@ class RuntimeVariantInfo(graphene.ObjectType): @classmethod def from_enum(cls, enum: RuntimeVariant) -> "RuntimeVariantInfo": - return cls(name=enum.name, human_readable_name=MODEL_SERVICE_RUNTIME_PROFILES[enum].name) + return cls(name=enum.value, human_readable_name=MODEL_SERVICE_RUNTIME_PROFILES[enum].name) class Endpoint(graphene.ObjectType): @@ -1107,7 +1107,7 @@ async def _do_mutate() -> ModifyEndpoint: if (_newval := props.runtime_variant) and _newval is not Undefined: try: - endpoint_row.runtime_variant = RuntimeVariant[_newval] + endpoint_row.runtime_variant = RuntimeVariant(_newval) except KeyError: raise InvalidAPIParameters(f"Unsupported runtime {_newval}") diff --git a/src/ai/backend/manager/models/gql.py b/src/ai/backend/manager/models/gql.py index 09ce80c5327..741fba7f542 100644 --- a/src/ai/backend/manager/models/gql.py +++ b/src/ai/backend/manager/models/gql.py @@ -1130,7 +1130,7 @@ async def resolve_images( root: Any, info: graphene.ResolveInfo, *, - is_installed=None, + is_installed: bool | None = None, is_operation=False, image_filters: list[str] | None = None, ) -> Sequence[Image]: @@ -1173,7 +1173,7 @@ async def resolve_images( else: raise InvalidAPIParameters("Unknown client role") if is_installed is not None: - items = [item for item in items if item.installed] + items = [item for item in items if item.installed == is_installed] return items @staticmethod @@ -1305,6 +1305,7 @@ async def resolve_user_node( ): return await UserNode.get_node(info, id) + @privileged_query(UserRole.SUPERADMIN) async def resolve_user_nodes( root: Any, info: graphene.ResolveInfo, diff --git a/src/ai/backend/manager/models/group.py b/src/ai/backend/manager/models/group.py index 553007c04ca..48cc1051ece 100644 --- a/src/ai/backend/manager/models/group.py +++ b/src/ai/backend/manager/models/group.py @@ -94,6 +94,13 @@ MAXIMUM_DOTFILE_SIZE = 64 * 1024 # 61 KiB _rx_slug = re.compile(r"^[a-zA-Z0-9]([a-zA-Z0-9._-]*[a-zA-Z0-9])?$") + +class UserRoleInProject(enum.StrEnum): + ADMIN = enum.auto() # TODO: impl project admin + USER = enum.auto() # UserRole.USER is associated as user + NONE = enum.auto() + + association_groups_users = sa.Table( "association_groups_users", mapper_registry.metadata, @@ -197,7 +204,11 @@ class GroupRow(Base): users = relationship("AssocGroupUserRow", back_populates="group") resource_policy_row = relationship("ProjectResourcePolicyRow", back_populates="projects") kernels = relationship("KernelRow", back_populates="group_row") - vfolder_row = relationship("VFolderRow", back_populates="group_row") + vfolder_rows = relationship( + "VFolderRow", + back_populates="group_row", + primaryjoin="GroupRow.id == foreign(VFolderRow.group)", + ) def _build_group_query(cond: sa.sql.BinaryExpression, domain_name: str) -> sa.sql.Select: @@ -807,6 +818,7 @@ class GroupNode(graphene.ObjectType): class Meta: interfaces = (AsyncNode,) + row_id = graphene.UUID(description="Added in 24.03.7. The undecoded id value stored in DB.") name = graphene.String() description = graphene.String() is_active = graphene.Boolean() @@ -817,6 +829,8 @@ class Meta: allowed_vfolder_hosts = graphene.JSONString() integration_id = graphene.String() resource_policy = graphene.String() + type = graphene.String(description=f"Added in 24.03.7. One of {[t.name for t in ProjectType]}.") + container_registry = graphene.JSONString(description="Added in 24.03.7.") scaling_groups = graphene.List( lambda: graphene.String, ) @@ -829,16 +843,19 @@ class Meta: def from_row(cls, row: GroupRow) -> GroupNode: return cls( id=row.id, + row_id=row.id, name=row.name, description=row.description, is_active=row.is_active, created_at=row.created_at, modified_at=row.modified_at, domain_name=row.domain_name, - total_resource_slots=row.total_resource_slots or {}, - allowed_vfolder_hosts=row.allowed_vfolder_hosts or {}, + total_resource_slots=row.total_resource_slots.to_json() or {}, + allowed_vfolder_hosts=row.allowed_vfolder_hosts.to_json() or {}, integration_id=row.integration_id, resource_policy=row.resource_policy, + type=row.type.name, + container_registry=row.container_registry, ) async def resolve_scaling_groups(self, info: graphene.ResolveInfo) -> Sequence[ScalingGroup]: diff --git a/src/ai/backend/manager/models/kernel.py b/src/ai/backend/manager/models/kernel.py index c3f8f88033c..d78ef7c92f3 100644 --- a/src/ai/backend/manager/models/kernel.py +++ b/src/ai/backend/manager/models/kernel.py @@ -4,18 +4,19 @@ import enum import logging import uuid +from collections.abc import Mapping from contextlib import asynccontextmanager as actxmgr from datetime import datetime from typing import ( TYPE_CHECKING, Any, AsyncIterator, - Mapping, Optional, Sequence, Type, TypedDict, TypeVar, + cast, ) import graphene @@ -51,6 +52,7 @@ KernelCreationFailed, KernelDestructionFailed, KernelExecutionFailed, + KernelNotFound, KernelRestartFailed, SessionNotFound, ) @@ -77,7 +79,7 @@ from .minilang.ordering import ColumnMapType, QueryOrderParser from .minilang.queryfilter import FieldSpecType, QueryFilterParser, enum_field_getter from .user import users -from .utils import ExtendedAsyncSAEngine, execute_with_retry, sql_json_merge +from .utils import ExtendedAsyncSAEngine, JSONCoalesceExpr, execute_with_retry, sql_json_merge if TYPE_CHECKING: from .gql import GraphQueryContext @@ -189,6 +191,7 @@ class KernelRole(enum.Enum): "get_logs_from_agent": KernelExecutionFailed, "refresh_session": KernelExecutionFailed, "commit_session": KernelExecutionFailed, + "commit_session_to_file": KernelExecutionFailed, } @@ -643,6 +646,62 @@ async def _query(): return await execute_with_retry(_query) + @classmethod + async def get_kernel_to_update_status( + cls, + db_session: SASession, + kernel_id: KernelId, + ) -> KernelRow: + _stmt = sa.select(KernelRow).where(KernelRow.id == kernel_id) + kernel_row = cast(KernelRow | None, await db_session.scalar(_stmt)) + if kernel_row is None: + raise KernelNotFound(f"Kernel not found (id:{kernel_id})") + return kernel_row + + def transit_status( + self, + status: KernelStatus, + status_info: str | None = None, + status_data: Mapping[str, Any] | JSONCoalesceExpr | None = None, + status_changed_at: datetime | None = None, + ) -> bool: + """ + Check whether the transition from a current status to the given status is valid or not. + Set the status if it is valid and return True. + Else, return False. + """ + if status not in KERNEL_STATUS_TRANSITION_MAP[self.status]: + return False + self.set_status(status, status_info, status_data, status_changed_at) + return True + + def set_status( + self, + status: KernelStatus, + status_info: str | None = None, + status_data: Mapping[str, Any] | JSONCoalesceExpr | None = None, + status_changed_at: datetime | None = None, + ) -> None: + """ + Set the status of the kernel. + """ + now = status_changed_at or datetime.now(tzutc()) + if status in (KernelStatus.CANCELLED, KernelStatus.TERMINATED): + self.terminated_at = now + self.status_changed = now + self.status = status + self.status_history = sql_json_merge( + KernelRow.status_history, + (), + { + status.name: now.isoformat(), + }, + ) + if status_info is not None: + self.status_info = status_info + if status_data is not None: + self.status_data = status_data + @classmethod async def set_kernel_status( cls, diff --git a/src/ai/backend/manager/models/keypair.py b/src/ai/backend/manager/models/keypair.py index 8f41ae9531c..a109c2cf2dc 100644 --- a/src/ai/backend/manager/models/keypair.py +++ b/src/ai/backend/manager/models/keypair.py @@ -13,12 +13,14 @@ from cryptography.hazmat.primitives.asymmetric import rsa from dateutil.parser import parse as dtparse from graphene.types.datetime import DateTime as GQLDateTime +from redis.asyncio import Redis from sqlalchemy.engine.row import Row from sqlalchemy.ext.asyncio import AsyncConnection as SAConnection from sqlalchemy.orm import relationship from sqlalchemy.sql.expression import false from ai.backend.common import msgpack, redis_helper +from ai.backend.common.defs import REDIS_RLIM_DB from ai.backend.common.types import AccessKey, SecretKey if TYPE_CHECKING: @@ -167,6 +169,7 @@ class Meta: last_used = GQLDateTime() rate_limit = graphene.Int() num_queries = graphene.Int() + rolling_count = graphene.Int(description="Added in 24.09.0.") user = graphene.UUID() projects = graphene.List(lambda: graphene.String) @@ -228,6 +231,18 @@ async def resolve_num_queries(self, info: graphene.ResolveInfo) -> int: return n return 0 + async def resolve_rolling_count(self, info: graphene.ResolveInfo) -> int: + ctx: GraphQueryContext = info.context + redis_rlim = redis_helper.get_redis_object( + ctx.shared_config.data["redis"], name="ratelimit", db=REDIS_RLIM_DB + ) + + async def _zcard(r: Redis): + return await r.zcard(self.access_key) + + ret = await redis_helper.execute(redis_rlim, _zcard) + return int(ret) if ret is not None else 0 + async def resolve_vfolders(self, info: graphene.ResolveInfo) -> Sequence[VirtualFolder]: ctx: GraphQueryContext = info.context loader = ctx.dataloader_manager.get_loader(ctx, "VirtualFolder") diff --git a/src/ai/backend/manager/models/rbac/BUILD b/src/ai/backend/manager/models/rbac/BUILD new file mode 100644 index 00000000000..db46e8d6c97 --- /dev/null +++ b/src/ai/backend/manager/models/rbac/BUILD @@ -0,0 +1 @@ +python_sources() diff --git a/src/ai/backend/manager/models/rbac/__init__.py b/src/ai/backend/manager/models/rbac/__init__.py new file mode 100644 index 00000000000..6108ab8ae5a --- /dev/null +++ b/src/ai/backend/manager/models/rbac/__init__.py @@ -0,0 +1,308 @@ +from __future__ import annotations + +import enum +import uuid +from abc import ABCMeta, abstractmethod +from collections.abc import Mapping +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Generic, Sequence, TypeVar + +import sqlalchemy as sa +from sqlalchemy.ext.asyncio import AsyncSession +from sqlalchemy.orm import load_only + +from ..group import AssocGroupUserRow, GroupRow, UserRoleInProject +from ..user import UserRole + +if TYPE_CHECKING: + from ..utils import ExtendedAsyncSAEngine + + +__all__: Sequence[str] = ( + "BasePermission", + "ClientContext", + "DomainScope", + "ProjectScope", + "UserScope", + "StorageHost", + "ImageRegistry", + "ScalingGroup", + "AbstractPermissionContext", + "AbstractPermissionContextBuilder", +) + + +class BasePermission(enum.StrEnum): + pass + + +PermissionType = TypeVar("PermissionType", bound=BasePermission) + + +ProjectContext = Mapping[uuid.UUID, UserRoleInProject] + + +@dataclass +class ClientContext: + db: ExtendedAsyncSAEngine + + domain_name: str + user_id: uuid.UUID + user_role: UserRole + + _domain_project_ctx: Mapping[str, ProjectContext] | None = field(init=False, default=None) + + async def get_accessible_projects_in_domain( + self, db_session: AsyncSession, domain_name: str + ) -> ProjectContext | None: + match self.user_role: + case UserRole.SUPERADMIN | UserRole.MONITOR: + if self._domain_project_ctx is None: + self._domain_project_ctx = {} + if domain_name not in self._domain_project_ctx: + stmt = ( + sa.select(GroupRow) + .where(GroupRow.domain_name == domain_name) + .options(load_only(GroupRow.id)) + ) + self._domain_project_ctx = { + **self._domain_project_ctx, + domain_name: { + row.id: UserRoleInProject.ADMIN + for row in await db_session.scalars(stmt) + }, + } + case UserRole.ADMIN | UserRole.USER: + _project_ctx = await self._get_or_init_project_ctx(db_session) + self._domain_project_ctx = {self.domain_name: _project_ctx} + return self._domain_project_ctx.get(domain_name) + + async def get_user_role_in_project( + self, db_session: AsyncSession, project_id: uuid.UUID + ) -> UserRoleInProject: + match self.user_role: + case UserRole.SUPERADMIN | UserRole.MONITOR: + return UserRoleInProject.ADMIN + case UserRole.ADMIN | UserRole.USER: + _project_ctx = await self._get_or_init_project_ctx(db_session) + return _project_ctx.get(project_id, UserRoleInProject.NONE) + + async def _get_or_init_project_ctx(self, db_session: AsyncSession) -> ProjectContext: + match self.user_role: + case UserRole.SUPERADMIN | UserRole.MONITOR: + # Superadmins and monitors can access to ALL projects in the system. + # Let's not fetch all project data from DB. + return {} + case UserRole.ADMIN: + if ( + self._domain_project_ctx is None + or self.domain_name not in self._domain_project_ctx + ): + stmt = ( + sa.select(GroupRow) + .where(GroupRow.domain_name == self.domain_name) + .options(load_only(GroupRow.id)) + ) + _project_ctx = { + row.id: UserRoleInProject.ADMIN for row in await db_session.scalars(stmt) + } + self._domain_project_ctx = {self.domain_name: _project_ctx} + return self._domain_project_ctx[self.domain_name] + case UserRole.USER: + if ( + self._domain_project_ctx is None + or self.domain_name not in self._domain_project_ctx + ): + stmt = ( + sa.select(AssocGroupUserRow) + .select_from(sa.join(AssocGroupUserRow, GroupRow)) + .where( + (AssocGroupUserRow.user_id == self.user_id) + & (GroupRow.domain_name == self.domain_name) + ) + ) + _project_ctx = { + row.id: UserRoleInProject.USER for row in await db_session.scalars(stmt) + } + self._domain_project_ctx = {self.domain_name: _project_ctx} + return self._domain_project_ctx[self.domain_name] + + +class BaseScope(metaclass=ABCMeta): + @abstractmethod + def __str__(self) -> str: + pass + + +@dataclass(frozen=True) +class DomainScope(BaseScope): + domain_name: str + + def __str__(self) -> str: + return f"Domain(name: {self.domain_name})" + + +@dataclass(frozen=True) +class ProjectScope(BaseScope): + project_id: uuid.UUID + + def __str__(self) -> str: + return f"Project(id: {self.project_id})" + + +@dataclass(frozen=True) +class UserScope(BaseScope): + user_id: uuid.UUID + + def __str__(self) -> str: + return f"User(id: {self.user_id})" + + +# Extra scope is to address some scopes that contain specific object types +# such as registries for images, scaling groups for agents, storage hosts for vfolders etc. +class ExtraScope: + pass + + +@dataclass(frozen=True) +class StorageHost(ExtraScope): + name: str + + +@dataclass(frozen=True) +class ImageRegistry(ExtraScope): + name: str + + +@dataclass(frozen=True) +class ScalingGroup(ExtraScope): + name: str + + +ObjectType = TypeVar("ObjectType") +ObjectIDType = TypeVar("ObjectIDType") + + +@dataclass +class AbstractPermissionContext( + Generic[PermissionType, ObjectType, ObjectIDType], metaclass=ABCMeta +): + """ + Define permissions under given User, Project or Domain scopes. + Each field of this class represents a mapping of ["accessible scope id", "permissions under the scope"]. + For example, `project` field has a mapping of ["accessible project id", "permissions under the project"]. + { + "PROJECT_A_ID": {"READ", "WRITE", "DELETE"} + "PROJECT_B_ID": {"READ"} + } + + `additional` and `overriding` fields have a mapping of ["object id", "permissions applied to the object"]. + `additional` field is used to add permissions to specific objects. It can be used for admins. + `overriding` field is used to address exceptional cases such as permission overriding or cover other scopes(scaling groups or storage hosts etc). + """ + + user_id_to_permission_map: Mapping[uuid.UUID, frozenset[PermissionType]] = field( + default_factory=dict + ) + project_id_to_permission_map: Mapping[uuid.UUID, frozenset[PermissionType]] = field( + default_factory=dict + ) + domain_name_to_permission_map: Mapping[str, frozenset[PermissionType]] = field( + default_factory=dict + ) + + object_id_to_additional_permission_map: Mapping[ObjectIDType, frozenset[PermissionType]] = ( + field(default_factory=dict) + ) + object_id_to_overriding_permission_map: Mapping[ObjectIDType, frozenset[PermissionType]] = ( + field(default_factory=dict) + ) + + def filter_by_permission(self, permission_to_include: PermissionType) -> None: + self.user_id_to_permission_map = { + uid: permissions + for uid, permissions in self.user_id_to_permission_map.items() + if permission_to_include in permissions + } + self.project_id_to_permission_map = { + pid: permissions + for pid, permissions in self.project_id_to_permission_map.items() + if permission_to_include in permissions + } + self.domain_name_to_permission_map = { + dname: permissions + for dname, permissions in self.domain_name_to_permission_map.items() + if permission_to_include in permissions + } + self.object_id_to_additional_permission_map = { + obj_id: permissions + for obj_id, permissions in self.object_id_to_additional_permission_map.items() + if permission_to_include in permissions + } + self.object_id_to_overriding_permission_map = { + obj_id: permissions + for obj_id, permissions in self.object_id_to_overriding_permission_map.items() + if permission_to_include in permissions + } + + @abstractmethod + async def build_query(self) -> sa.sql.Select | None: + pass + + @abstractmethod + async def calculate_final_permission(self, acl_obj: ObjectType) -> frozenset[PermissionType]: + """ + Calculate the final permissions applied to the given object based on the fields in this class. + """ + pass + + +PermissionContextType = TypeVar("PermissionContextType", bound=AbstractPermissionContext) + + +class AbstractPermissionContextBuilder( + Generic[PermissionType, PermissionContextType], metaclass=ABCMeta +): + async def build( + self, + ctx: ClientContext, + target_scope: BaseScope, + *, + permission: PermissionType | None = None, + ) -> PermissionContextType: + match target_scope: + case UserScope(user_id=user_id): + result = await self._build_in_user_scope(ctx, user_id) + case ProjectScope(project_id=project_id): + result = await self._build_in_project_scope(ctx, project_id) + case DomainScope(domain_name=domain_name): + result = await self._build_in_domain_scope(ctx, domain_name) + case _: + raise RuntimeError(f"invalid scope `{target_scope}`") + if permission is not None: + result.filter_by_permission(permission) + return result + + @abstractmethod + async def _build_in_user_scope( + self, + ctx: ClientContext, + user_id: uuid.UUID, + ) -> PermissionContextType: + pass + + @abstractmethod + async def _build_in_project_scope( + self, + ctx: ClientContext, + project_id: uuid.UUID, + ) -> PermissionContextType: + pass + + @abstractmethod + async def _build_in_domain_scope( + self, + ctx: ClientContext, + domain_name: str, + ) -> PermissionContextType: + pass diff --git a/src/ai/backend/manager/models/rbac/exceptions.py b/src/ai/backend/manager/models/rbac/exceptions.py new file mode 100644 index 00000000000..d83195ad193 --- /dev/null +++ b/src/ai/backend/manager/models/rbac/exceptions.py @@ -0,0 +1,6 @@ +class RBACException(Exception): + pass + + +class NotEnoughPermission(RBACException): + pass diff --git a/src/ai/backend/manager/models/scaling_group.py b/src/ai/backend/manager/models/scaling_group.py index 522c8870c53..11b35ad4f09 100644 --- a/src/ai/backend/manager/models/scaling_group.py +++ b/src/ai/backend/manager/models/scaling_group.py @@ -22,12 +22,18 @@ from sqlalchemy.dialects import postgresql as pgsql from sqlalchemy.engine.row import Row from sqlalchemy.ext.asyncio import AsyncConnection as SAConnection -from sqlalchemy.orm import relationship +from sqlalchemy.orm import load_only, relationship from sqlalchemy.sql.expression import true from ai.backend.common import validators as tx -from ai.backend.common.types import AgentSelectionStrategy, JSONSerializableMixin, SessionTypes +from ai.backend.common.types import ( + AgentSelectionStrategy, + JSONSerializableMixin, + ResourceSlot, + SessionTypes, +) +from .agent import AgentStatus from .base import ( Base, IDColumn, @@ -315,6 +321,65 @@ class ScalingGroup(graphene.ObjectType): scheduler_opts = graphene.JSONString() use_host_network = graphene.Boolean() + # Dynamic fields. + agent_count_by_status = graphene.Field( + graphene.Int, + description="Added in 24.03.7.", + status=graphene.String( + default_value=AgentStatus.ALIVE.name, + description=f"Possible states of an agent. Should be one of {[s.name for s in AgentStatus]}. Default is 'ALIVE'.", + ), + ) + + agent_total_resource_slots_by_status = graphene.Field( + graphene.JSONString, + description="Added in 24.03.7.", + status=graphene.String( + default_value=AgentStatus.ALIVE.name, + description=f"Possible states of an agent. Should be one of {[s.name for s in AgentStatus]}. Default is 'ALIVE'.", + ), + ) + + async def resolve_agent_count_by_status( + self, info: graphene.ResolveInfo, status: str = AgentStatus.ALIVE.name + ) -> int: + from .agent import Agent + + return await Agent.load_count( + info.context, + raw_status=status, + scaling_group=self.name, + ) + + async def resolve_agent_total_resource_slots_by_status( + self, info: graphene.ResolveInfo, status: str = AgentStatus.ALIVE.name + ) -> Mapping[str, Any]: + from .agent import AgentRow, AgentStatus + + graph_ctx = info.context + async with graph_ctx.db.begin_readonly_session() as db_session: + query_stmt = ( + sa.select(AgentRow) + .where( + (AgentRow.scaling_group == self.name) & (AgentRow.status == AgentStatus[status]) + ) + .options(load_only(AgentRow.occupied_slots, AgentRow.available_slots)) + ) + result = (await db_session.scalars(query_stmt)).all() + agent_rows = cast(list[AgentRow], result) + + total_occupied_slots = ResourceSlot() + total_available_slots = ResourceSlot() + + for agent_row in agent_rows: + total_occupied_slots += agent_row.occupied_slots + total_available_slots += agent_row.available_slots + + return { + "occupied_slots": total_occupied_slots.to_json(), + "available_slots": total_available_slots.to_json(), + } + @classmethod def from_row( cls, diff --git a/src/ai/backend/manager/models/session.py b/src/ai/backend/manager/models/session.py index fc7f7a14ed8..42c7967a2cd 100644 --- a/src/ai/backend/manager/models/session.py +++ b/src/ai/backend/manager/models/session.py @@ -3,18 +3,18 @@ import asyncio import enum import logging +from collections.abc import Iterable, Mapping, Sequence from contextlib import asynccontextmanager as actxmgr +from dataclasses import dataclass, field from datetime import datetime from typing import ( TYPE_CHECKING, Any, AsyncIterator, - Iterable, List, - Mapping, Optional, - Sequence, Union, + cast, ) from uuid import UUID @@ -45,6 +45,7 @@ KernelCreationFailed, KernelDestructionFailed, KernelExecutionFailed, + KernelNotFound, KernelRestartFailed, MainKernelNotFound, SessionNotFound, @@ -73,13 +74,20 @@ from .minilang.ordering import ColumnMapType, QueryOrderParser from .minilang.queryfilter import FieldSpecType, QueryFilterParser, enum_field_getter from .user import UserRow -from .utils import ExtendedAsyncSAEngine, agg_to_array, execute_with_retry, sql_json_merge +from .utils import ( + ExtendedAsyncSAEngine, + JSONCoalesceExpr, + agg_to_array, + execute_with_retry, + sql_json_merge, +) if TYPE_CHECKING: from sqlalchemy.engine import Row from .gql import GraphQueryContext +log = BraceStyleAdapter(logging.getLogger(__spec__.name)) # type: ignore[name-defined] __all__ = ( "determine_session_status", @@ -176,6 +184,8 @@ class SessionStatus(enum.Enum): "get_logs_from_agent": KernelExecutionFailed, "refresh_session": KernelExecutionFailed, "commit_session": KernelExecutionFailed, + "commit_session_to_file": KernelExecutionFailed, + "trigger_batch_execution": KernelExecutionFailed, } @@ -507,6 +517,31 @@ async def _match_sessions_by_name( return result.scalars().all() +COMPUTE_CONCURRENCY_USED_KEY_PREFIX = "keypair.concurrency_used." +SYSTEM_CONCURRENCY_USED_KEY_PREFIX = "keypair.sftp_concurrency_used." + + +@dataclass +class ConcurrencyUsed: + access_key: AccessKey + compute_session_ids: set[SessionId] = field(default_factory=set) + system_session_ids: set[SessionId] = field(default_factory=set) + + @property + def compute_concurrency_used_key(self) -> str: + return f"{COMPUTE_CONCURRENCY_USED_KEY_PREFIX}{self.access_key}" + + @property + def system_concurrency_used_key(self) -> str: + return f"{SYSTEM_CONCURRENCY_USED_KEY_PREFIX}{self.access_key}" + + def to_cnt_map(self) -> Mapping[str, int]: + return { + self.compute_concurrency_used_key: len(self.compute_concurrency_used_key), + self.system_concurrency_used_key: len(self.system_concurrency_used_key), + } + + class SessionOp(enum.StrEnum): CREATE = "create_session" DESTROY = "destroy_session" @@ -706,6 +741,14 @@ def resource_opts(self) -> dict[str, Any]: def is_private(self) -> bool: return any([kernel.is_private for kernel in self.kernels]) + def get_kernel_by_id(self, kernel_id: KernelId) -> KernelRow: + kerns = tuple(kern for kern in self.kernels if kern.id == kernel_id) + if len(kerns) > 1: + raise TooManyKernelsFound(f"Multiple kernels found (id:{kernel_id}).") + if len(kerns) == 0: + raise KernelNotFound(f"Session has no such kernel (sid:{self.id}, kid:{kernel_id}))") + return kerns[0] + def get_kernel_by_cluster_name(self, cluster_name: str) -> KernelRow: kerns = tuple(kern for kern in self.kernels if kern.cluster_name == cluster_name) if len(kerns) > 1: @@ -782,6 +825,76 @@ async def _check_and_update() -> SessionStatus | None: return await execute_with_retry(_check_and_update) + @classmethod + async def get_session_to_determine_status( + cls, db_session: SASession, session_id: SessionId + ) -> SessionRow: + stmt = ( + sa.select(SessionRow) + .where(SessionRow.id == session_id) + .options( + selectinload(SessionRow.kernels).options( + load_only(KernelRow.status, KernelRow.cluster_role, KernelRow.status_info) + ), + ) + ) + session_row = cast(SessionRow | None, await db_session.scalar(stmt)) + if session_row is None: + raise SessionNotFound(f"Session not found (id:{session_id})") + return session_row + + def determine_and_set_status( + self, + status_info: str | None = None, + status_data: Mapping[str, Any] | JSONCoalesceExpr | None = None, + status_changed_at: datetime | None = None, + ) -> bool: + """ + Determine the current status of a session based on its sibling kernels. + If it is possible to transit from the current status to the determined status, set status. + Else, do nothing. + Return True if a transition happened, else return False. + """ + + determined_status = determine_session_status(self.kernels) + if determined_status not in SESSION_STATUS_TRANSITION_MAP[self.status]: + return False + + self.set_status(determined_status, status_info, status_data, status_changed_at) + return True + + def set_status( + self, + status: SessionStatus, + status_info: str | None = None, + status_data: Mapping[str, Any] | JSONCoalesceExpr | None = None, + status_changed_at: datetime | None = None, + ) -> None: + """ + Set the status of the session. + """ + now = status_changed_at or datetime.now(tzutc()) + if status in (SessionStatus.CANCELLED, SessionStatus.TERMINATED): + self.terminated_at = now + self.status = status + self.status_history = sql_json_merge( + SessionRow.status_history, + (), + { + status.name: now.isoformat(), + }, + ) + if status_data is not None: + self.status_data = status_data + + _status_info: str | None = None + if status_info is None: + _status_info = self.main_kernel.status_info + else: + _status_info = status_info + if _status_info is not None: + self.status_info = _status_info + @staticmethod async def set_session_status( db: ExtendedAsyncSAEngine, diff --git a/src/ai/backend/manager/models/user.py b/src/ai/backend/manager/models/user.py index 0f3783608e6..fa0b503c790 100644 --- a/src/ai/backend/manager/models/user.py +++ b/src/ai/backend/manager/models/user.py @@ -2,7 +2,7 @@ import enum import logging -from typing import TYPE_CHECKING, Any, Dict, Iterable, Mapping, Optional, Sequence +from typing import TYPE_CHECKING, Any, Dict, Iterable, Mapping, Optional, Sequence, cast from uuid import UUID, uuid4 import aiotools @@ -186,7 +186,11 @@ class UserRow(Base): main_keypair = relationship("KeyPairRow", foreign_keys=users.c.main_access_key) - vfolder_row = relationship("VFolderRow", back_populates="user_row") + vfolder_rows = relationship( + "VFolderRow", + back_populates="user_row", + primaryjoin="UserRow.uuid == foreign(VFolderRow.user)", + ) class UserGroup(graphene.ObjectType): @@ -662,8 +666,13 @@ async def _post_func(conn: SAConnection, result: Result) -> Row: model_store_query = sa.select([groups.c.id]).where( groups.c.type == ProjectType.MODEL_STORE ) - model_store_gid = (await conn.execute(model_store_query)).first()["id"] - gids_to_join = [*group_ids, model_store_gid] + model_store_project = cast( + dict[str, Any] | None, (await conn.execute(model_store_query)).first() + ) + if model_store_project is not None: + gids_to_join = [*group_ids, model_store_project["id"]] + else: + gids_to_join = group_ids # Add user to groups if group_ids parameter is provided. if gids_to_join: @@ -1395,7 +1404,7 @@ async def get_node(cls, info: graphene.ResolveInfo, id) -> UserNode: "modified_at": ("modified_at", dtparse), "domain_name": ("domain_name", None), "role": ("role", enum_field_getter(UserRole)), - "resource_policy": ("domain_name", None), + "resource_policy": ("resource_policy", None), "allowed_client_ip": ("allowed_client_ip", None), "totp_activated": ("totp_activated", None), "totp_activated_at": ("totp_activated_at", dtparse), diff --git a/src/ai/backend/manager/models/utils.py b/src/ai/backend/manager/models/utils.py index 27034d83cfa..4ad6d2a1952 100644 --- a/src/ai/backend/manager/models/utils.py +++ b/src/ai/backend/manager/models/utils.py @@ -12,8 +12,11 @@ AsyncIterator, Awaitable, Callable, + Concatenate, Mapping, + ParamSpec, Tuple, + TypeAlias, TypeVar, overload, ) @@ -229,11 +232,17 @@ async def advisory_lock(self, lock_id: LockID) -> AsyncIterator[None]: ) +P = ParamSpec("P") +TQueryResult = TypeVar("TQueryResult") + + @overload async def execute_with_txn_retry( - txn_func: Callable[[SASession], Awaitable[TQueryResult]], + txn_func: Callable[Concatenate[SASession, P], Awaitable[TQueryResult]], begin_trx: Callable[..., AbstractAsyncCtxMgr[SASession]], connection: SAConnection, + *args: P.args, + **kwargs: P.kwargs, ) -> TQueryResult: ... @@ -241,19 +250,23 @@ async def execute_with_txn_retry( # including `SASession` and `SAConnection`. @overload async def execute_with_txn_retry( # type: ignore[misc] - txn_func: Callable[[SAConnection], Awaitable[TQueryResult]], + txn_func: Callable[Concatenate[SAConnection, P], Awaitable[TQueryResult]], begin_trx: Callable[..., AbstractAsyncCtxMgr[SAConnection]], connection: SAConnection, + *args: P.args, + **kwargs: P.kwargs, ) -> TQueryResult: ... # TODO: Allow `SASession` parameter only, remove type overloading and remove `begin_trx` after migrating Core APIs to ORM APIs. async def execute_with_txn_retry( - txn_func: Callable[[SASession], Awaitable[TQueryResult]] - | Callable[[SAConnection], Awaitable[TQueryResult]], + txn_func: Callable[Concatenate[SASession, P], Awaitable[TQueryResult]] + | Callable[Concatenate[SAConnection, P], Awaitable[TQueryResult]], begin_trx: Callable[..., AbstractAsyncCtxMgr[SASession]] | Callable[..., AbstractAsyncCtxMgr[SAConnection]], connection: SAConnection, + *args: P.args, + **kwargs: P.kwargs, ) -> TQueryResult: """ Execute DB related function by retrying transaction in a given connection. @@ -271,13 +284,13 @@ async def execute_with_txn_retry( retry=retry_if_exception_type(TryAgain), ): with attempt: - async with begin_trx(bind=connection) as session_or_conn: - try: - result = await txn_func(session_or_conn) - except DBAPIError as e: - if is_db_retry_error(e): - raise TryAgain - raise + try: + async with begin_trx(bind=connection) as session_or_conn: + result = await txn_func(session_or_conn, *args, **kwargs) + except DBAPIError as e: + if is_db_retry_error(e): + raise TryAgain + raise except RetryError: raise asyncio.TimeoutError( f"DB serialization failed after {max_attempts} retry transactions" @@ -378,9 +391,6 @@ async def reenter_txn_session( yield sess -TQueryResult = TypeVar("TQueryResult") - - async def execute_with_retry(txn_func: Callable[[], Awaitable[TQueryResult]]) -> TQueryResult: max_attempts = 20 result: TQueryResult | Sentinel = Sentinel.token @@ -403,13 +413,16 @@ async def execute_with_retry(txn_func: Callable[[], Awaitable[TQueryResult]]) -> return result +JSONCoalesceExpr: TypeAlias = sa.sql.elements.BinaryExpression + + def sql_json_merge( col, key: Tuple[str, ...], obj: Mapping[str, Any], *, _depth: int = 0, -): +) -> JSONCoalesceExpr: """ Generate an SQLAlchemy column update expression that merges the given object with the existing object at a specific (nested) key of the given JSONB column, @@ -445,7 +458,7 @@ def sql_json_increment( *, parent_updates: Mapping[str, Any] = None, _depth: int = 0, -): +) -> JSONCoalesceExpr: """ Generate an SQLAlchemy column update expression that increments the value at a specific (nested) key of the given JSONB column, diff --git a/src/ai/backend/manager/models/vfolder.py b/src/ai/backend/manager/models/vfolder.py index 8327cf59376..35e0a40e202 100644 --- a/src/ai/backend/manager/models/vfolder.py +++ b/src/ai/backend/manager/models/vfolder.py @@ -4,9 +4,21 @@ import logging import os.path import uuid +from collections.abc import Container, Mapping +from dataclasses import dataclass from datetime import datetime from pathlib import PurePosixPath -from typing import TYPE_CHECKING, Any, Final, List, Mapping, NamedTuple, Optional, Sequence, cast +from typing import ( + TYPE_CHECKING, + Any, + Final, + List, + NamedTuple, + Optional, + Sequence, + TypeAlias, + cast, +) import aiohttp import aiotools @@ -23,7 +35,7 @@ from sqlalchemy.engine.row import Row from sqlalchemy.ext.asyncio import AsyncConnection as SAConnection from sqlalchemy.ext.asyncio import AsyncSession as SASession -from sqlalchemy.orm import load_only, relationship, selectinload +from sqlalchemy.orm import joinedload, load_only, relationship, selectinload from ai.backend.common.bgtask import ProgressReporter from ai.backend.common.config import model_definition_iv @@ -71,11 +83,20 @@ metadata, ) from .gql_relay import AsyncNode, Connection, ConnectionResolverResult -from .group import GroupRow, ProjectType +from .group import GroupRow, ProjectType, UserRoleInProject from .minilang.ordering import OrderSpecItem, QueryOrderParser from .minilang.queryfilter import FieldSpecItem, QueryFilterParser, enum_field_getter +from .rbac import ( + AbstractPermissionContext, + AbstractPermissionContextBuilder, + BasePermission, + BaseScope, + ClientContext, + StorageHost, +) +from .rbac.exceptions import NotEnoughPermission from .session import DEAD_SESSION_STATUSES, SessionRow -from .user import UserRole +from .user import UserRole, UserRow from .utils import ExtendedAsyncSAEngine, execute_with_retry, sql_json_merge if TYPE_CHECKING: @@ -88,6 +109,7 @@ "vfolder_invitations", "vfolder_permissions", "VirtualFolder", + "VFolderRBACPermission", "VFolderOwnershipType", "VFolderInvitationState", "VFolderPermission", @@ -116,6 +138,11 @@ "SOFT_DELETED_VFOLDER_STATUSES", "HARD_DELETED_VFOLDER_STATUSES", "VFolderPermissionSetAlias", + "get_vfolders", + "VFolderWithPermissionSet", + "OWNER_PERMISSIONS", + "PermissionContext", + "PermissionContextBuilder", ) @@ -232,9 +259,10 @@ class VFolderStatusSet(enum.StrEnum): VFolderStatusSet.DELETABLE: { VFolderOperationStatus.READY, }, - # if DELETABLE access status is requested, only DELETE_PENDING operation status is accepted. + # if DELETABLE access status is requested, DELETE_PENDING, DELETE_COMPLETE operation status is accepted. VFolderStatusSet.PURGABLE: { VFolderOperationStatus.DELETE_PENDING, + VFolderOperationStatus.DELETE_COMPLETE, }, VFolderStatusSet.RECOVERABLE: { VFolderOperationStatus.DELETE_PENDING, @@ -308,7 +336,9 @@ class VFolderCloneInfo(NamedTuple): nullable=False, index=True, ), - sa.Column("permission", EnumValueType(VFolderPermission), default=VFolderPermission.READ_WRITE), + sa.Column( + "permission", EnumValueType(VFolderPermission), default=VFolderPermission.READ_WRITE + ), # legacy sa.Column("max_files", sa.Integer(), default=1000), sa.Column("max_size", sa.Integer(), default=None), # in MBytes sa.Column("num_files", sa.Integer(), default=0), @@ -326,8 +356,8 @@ class VFolderCloneInfo(NamedTuple): nullable=False, index=True, ), - sa.Column("user", GUID, sa.ForeignKey("users.uuid"), nullable=True), # owner if user vfolder - sa.Column("group", GUID, sa.ForeignKey("groups.id"), nullable=True), # owner if project vfolder + sa.Column("user", GUID, nullable=True), # owner if user vfolder + sa.Column("group", GUID, nullable=True), # owner if project vfolder sa.Column("cloneable", sa.Boolean, default=False, nullable=False), sa.Column( "status", @@ -346,15 +376,6 @@ class VFolderCloneInfo(NamedTuple): # } sa.Column("status_history", pgsql.JSONB(), nullable=True, default=sa.null()), sa.Column("status_changed", sa.DateTime(timezone=True), nullable=True, index=True), - sa.CheckConstraint( - "(ownership_type = 'user' AND \"user\" IS NOT NULL) OR " - "(ownership_type = 'group' AND \"group\" IS NOT NULL)", - name="ownership_type_match_with_user_or_group", - ), - sa.CheckConstraint( - '("user" IS NULL AND "group" IS NOT NULL) OR ("user" IS NOT NULL AND "group" IS NULL)', - name="either_one_of_user_or_group", - ), ) @@ -426,17 +447,25 @@ class VFolderRow(Base): __table__ = vfolders endpoints = relationship("EndpointRow", back_populates="model_row") - user_row = relationship("UserRow", back_populates="vfolder_row") - group_row = relationship("GroupRow", back_populates="vfolder_row") + user_row = relationship( + "UserRow", + back_populates="vfolder_rows", + primaryjoin="UserRow.uuid == foreign(VFolderRow.user)", + ) + group_row = relationship( + "GroupRow", + back_populates="vfolder_rows", + primaryjoin="GroupRow.id == foreign(VFolderRow.group)", + ) @classmethod async def get( cls, session: SASession, id: uuid.UUID, - load_user=False, - load_group=False, - ) -> "VFolderRow": + load_user: bool = False, + load_group: bool = False, + ) -> VFolderRow: query = sa.select(VFolderRow).where(VFolderRow.id == id) if load_user: query = query.options(selectinload(VFolderRow.user_row)) @@ -484,6 +513,7 @@ async def query_accessible_vfolders( extra_invited_vf_conds=None, extra_vf_user_conds=None, extra_vf_group_conds=None, + allowed_status_set: VFolderStatusSet | None = None, ) -> Sequence[Mapping[str, Any]]: from ai.backend.manager.models import association_groups_users as agus from ai.backend.manager.models import groups, users @@ -555,11 +585,15 @@ async def _append_entries(_query, _is_owner=True): if "user" in allowed_vfolder_types: # Scan vfolders on requester's behalf. j = vfolders.join(users, vfolders.c.user == users.c.uuid) - query = ( - sa.select(vfolders_selectors + [vfolders.c.permission, users.c.email], use_labels=True) - .select_from(j) - .where(vfolders.c.status.not_in(vfolder_status_map[VFolderStatusSet.INACCESSIBLE])) - ) + query = sa.select( + vfolders_selectors + [vfolders.c.permission, users.c.email], use_labels=True + ).select_from(j) + if allowed_status_set is not None: + query = query.where(vfolders.c.status.in_(vfolder_status_map[allowed_status_set])) + else: + query = query.where( + vfolders.c.status.not_in(vfolder_status_map[VFolderStatusSet.INACCESSIBLE]) + ) if not allow_privileged_access or ( user_role != UserRole.ADMIN and user_role != UserRole.SUPERADMIN ): @@ -585,9 +619,14 @@ async def _append_entries(_query, _is_owner=True): .where( (vfolder_permissions.c.user == user_uuid) & (vfolders.c.ownership_type == VFolderOwnershipType.USER) - & (vfolders.c.status.not_in(vfolder_status_map[VFolderStatusSet.INACCESSIBLE])), ) ) + if allowed_status_set is not None: + query = query.where(vfolders.c.status.in_(vfolder_status_map[allowed_status_set])) + else: + query = query.where( + vfolders.c.status.not_in(vfolder_status_map[VFolderStatusSet.INACCESSIBLE]) + ) if extra_invited_vf_conds is not None: query = query.where(extra_invited_vf_conds) await _append_entries(query, _is_owner=False) @@ -631,12 +670,14 @@ async def _append_entries(_query, _is_owner=True): query = ( sa.select(vfolder_permissions.c.permission, vfolder_permissions.c.vfolder) .select_from(j) - .where( - (vfolders.c.group.in_(group_ids)) - & (vfolder_permissions.c.user == user_uuid) - & (vfolders.c.status.not_in(vfolder_status_map[VFolderStatusSet.INACCESSIBLE])), - ) + .where((vfolders.c.group.in_(group_ids)) & (vfolder_permissions.c.user == user_uuid)) ) + if allowed_status_set is not None: + query = query.where(vfolders.c.status.in_(vfolder_status_map[allowed_status_set])) + else: + query = query.where( + vfolders.c.status.not_in(vfolder_status_map[VFolderStatusSet.INACCESSIBLE]) + ) if extra_vf_conds is not None: query = query.where(extra_vf_conds) if extra_vf_user_conds is not None: @@ -653,6 +694,497 @@ async def _append_entries(_query, _is_owner=True): return entries +class VFolderRBACPermission(BasePermission): + # Only owners can do + CLONE = enum.auto() + ASSIGN_PERMISSION_TO_OTHERS = enum.auto() # Invite, share + + # `create_vfolder` action should be in {Domain, Project, or User} permissions, not here + READ_ATTRIBUTE = enum.auto() + UPDATE_ATTRIBUTE = enum.auto() + DELETE_VFOLDER = enum.auto() + + READ_CONTENT = enum.auto() + WRITE_CONTENT = enum.auto() + DELETE_CONTENT = enum.auto() + + MOUNT_RO = enum.auto() + MOUNT_RW = enum.auto() + MOUNT_WD = enum.auto() + + +WhereClauseType: TypeAlias = ( + sa.sql.expression.BinaryExpression | sa.sql.expression.BooleanClauseList +) +# TypeAlias is deprecated since 3.12 + +OWNER_PERMISSIONS: frozenset[VFolderRBACPermission] = frozenset([ + perm for perm in VFolderRBACPermission +]) +ADMIN_PERMISSIONS: frozenset[VFolderRBACPermission] = frozenset([ + VFolderRBACPermission.READ_ATTRIBUTE, + VFolderRBACPermission.UPDATE_ATTRIBUTE, + VFolderRBACPermission.DELETE_VFOLDER, +]) +ADMIN_PERMISSIONS_ON_OTHER_USER_INVITED_FOLDERS: frozenset[VFolderRBACPermission] = frozenset([ + VFolderRBACPermission.READ_ATTRIBUTE, +]) # Admins are allowed to READ folders that other users are invited to. +USER_PERMISSIONS_ON_PROJECT_FOLDERS: frozenset[VFolderRBACPermission] = frozenset([ + VFolderRBACPermission.READ_ATTRIBUTE, + VFolderRBACPermission.READ_CONTENT, + VFolderRBACPermission.WRITE_CONTENT, + VFolderRBACPermission.DELETE_CONTENT, + VFolderRBACPermission.MOUNT_RO, + VFolderRBACPermission.MOUNT_RW, + VFolderRBACPermission.MOUNT_WD, +]) +# `ADMIN_PERMISSIONS_ON_PROJECT_FOLDERS == OWNER_PERMISSIONS` is true +# but it doesn't mean that admins are the owner of the project folders. +ADMIN_PERMISSIONS_ON_PROJECT_FOLDERS: frozenset[VFolderRBACPermission] = ( + ADMIN_PERMISSIONS + | USER_PERMISSIONS_ON_PROJECT_FOLDERS + | {VFolderRBACPermission.CLONE, VFolderRBACPermission.ASSIGN_PERMISSION_TO_OTHERS} +) + +# TODO: Change type of `vfolder_permissions.permission` to VFolderRBACPermission +PERMISSION_TO_RBAC_PERMISSION_MAP: Mapping[VFolderPermission, frozenset[VFolderRBACPermission]] = { + VFolderPermission.READ_ONLY: frozenset([ + VFolderRBACPermission.READ_ATTRIBUTE, + VFolderRBACPermission.READ_CONTENT, + ]), + VFolderPermission.READ_WRITE: frozenset([ + VFolderRBACPermission.READ_ATTRIBUTE, + VFolderRBACPermission.UPDATE_ATTRIBUTE, + VFolderRBACPermission.DELETE_VFOLDER, + VFolderRBACPermission.READ_CONTENT, + VFolderRBACPermission.WRITE_CONTENT, + VFolderRBACPermission.DELETE_CONTENT, + VFolderRBACPermission.MOUNT_RO, + VFolderRBACPermission.MOUNT_RW, + ]), + VFolderPermission.RW_DELETE: frozenset([ + VFolderRBACPermission.READ_ATTRIBUTE, + VFolderRBACPermission.UPDATE_ATTRIBUTE, + VFolderRBACPermission.DELETE_VFOLDER, + VFolderRBACPermission.READ_CONTENT, + VFolderRBACPermission.WRITE_CONTENT, + VFolderRBACPermission.DELETE_CONTENT, + VFolderRBACPermission.MOUNT_RO, + VFolderRBACPermission.MOUNT_RW, + VFolderRBACPermission.MOUNT_WD, + ]), + VFolderPermission.OWNER_PERM: OWNER_PERMISSIONS, +} + + +@dataclass +class PermissionContext(AbstractPermissionContext[VFolderRBACPermission, VFolderRow, uuid.UUID]): + @property + def query_condition(self) -> WhereClauseType | None: + cond: WhereClauseType | None = None + + def _OR_coalesce( + base_cond: WhereClauseType | None, + _cond: sa.sql.expression.BinaryExpression, + ) -> WhereClauseType: + return base_cond | _cond if base_cond is not None else _cond + + def _AND_coalesce( + base_cond: WhereClauseType | None, + _cond: sa.sql.expression.BinaryExpression, + ) -> WhereClauseType: + return base_cond & _cond if base_cond is not None else _cond + + if self.user_id_to_permission_map: + cond = _OR_coalesce(cond, VFolderRow.user.in_(self.user_id_to_permission_map.keys())) + if self.project_id_to_permission_map: + cond = _OR_coalesce( + cond, VFolderRow.group.in_(self.project_id_to_permission_map.keys()) + ) + if self.domain_name_to_permission_map: + cond = _OR_coalesce( + cond, VFolderRow.domain_name.in_(self.domain_name_to_permission_map.keys()) + ) + if self.object_id_to_additional_permission_map: + cond = _OR_coalesce( + cond, VFolderRow.id.in_(self.object_id_to_additional_permission_map.keys()) + ) + if self.object_id_to_overriding_permission_map: + cond = _OR_coalesce( + cond, VFolderRow.id.in_(self.object_id_to_overriding_permission_map.keys()) + ) + + return cond + + async def build_query(self) -> sa.sql.Select | None: + cond = self.query_condition + if cond is None: + return None + return sa.select(VFolderRow).where(cond) + + async def calculate_final_permission( + self, acl_obj: VFolderRow + ) -> frozenset[VFolderRBACPermission]: + vfolder_row = acl_obj + vfolder_id = cast(uuid.UUID, vfolder_row.id) + if ( + overriding_perm := self.object_id_to_overriding_permission_map.get(vfolder_id) + ) is not None: + return overriding_perm + permissions: set[VFolderRBACPermission] = set() + permissions |= self.object_id_to_additional_permission_map.get(vfolder_id, set()) + permissions |= self.user_id_to_permission_map.get(vfolder_row.user, set()) + permissions |= self.project_id_to_permission_map.get(vfolder_row.group, set()) + permissions |= self.domain_name_to_permission_map.get(vfolder_row.domain_name, set()) + return frozenset(permissions) + + +class PermissionContextBuilder( + AbstractPermissionContextBuilder[VFolderRBACPermission, PermissionContext] +): + db_session: SASession + + def __init__(self, db_session: SASession) -> None: + self.db_session = db_session + + async def _build_in_user_scope( + self, + ctx: ClientContext, + user_id: uuid.UUID, + ) -> PermissionContext: + match ctx.user_role: + case UserRole.SUPERADMIN | UserRole.MONITOR: + if ctx.user_id == user_id: + additional_stmt = ( + sa.select(VFolderPermissionRow) + .select_from(sa.join(VFolderPermissionRow, VFolderRow)) + .where( + (VFolderPermissionRow.user == ctx.user_id) + & ( + VFolderRow.ownership_type == VFolderOwnershipType.USER + ) # filter out project vfolders + ) + ) + object_id_to_additional_permission_map = { + row.vfolder: PERMISSION_TO_RBAC_PERMISSION_MAP[row.permission] + for row in await self.db_session.scalars(additional_stmt) + } + user_id_to_permission_map = {user_id: OWNER_PERMISSIONS} + return PermissionContext( + user_id_to_permission_map, + object_id_to_additional_permission_map=object_id_to_additional_permission_map, + ) + else: + additional_stmt = ( + sa.select(VFolderPermissionRow) + .select_from(sa.join(VFolderPermissionRow, VFolderRow)) + .where( + (VFolderPermissionRow.user == ctx.user_id) + & ( + VFolderRow.ownership_type == VFolderOwnershipType.USER + ) # filter out project vfolders + ) + ) + object_id_to_additional_permission_map = { + row.vfolder: ADMIN_PERMISSIONS_ON_OTHER_USER_INVITED_FOLDERS + for row in await self.db_session.scalars(additional_stmt) + } + user_id_to_permission_map = {user_id: ADMIN_PERMISSIONS} + return PermissionContext( + user_id_to_permission_map, + object_id_to_additional_permission_map=object_id_to_additional_permission_map, + ) + case UserRole.ADMIN: + if ctx.user_id == user_id: + additional_stmt = ( + sa.select(VFolderPermissionRow) + .select_from(sa.join(VFolderPermissionRow, VFolderRow)) + .where( + (VFolderPermissionRow.user == ctx.user_id) + & ( + VFolderRow.ownership_type == VFolderOwnershipType.USER + ) # filter out project vfolders + ) + ) + object_id_to_additional_permission_map = { + row.vfolder: PERMISSION_TO_RBAC_PERMISSION_MAP[row.permission] + for row in await self.db_session.scalars(additional_stmt) + } + + user_id_to_permission_map = {user_id: OWNER_PERMISSIONS} + return PermissionContext( + user_id_to_permission_map, + object_id_to_additional_permission_map=object_id_to_additional_permission_map, + ) + else: + # domain admins cannot access to users in another domain + user_domain_stmt = ( + sa.select(UserRow) + .where(UserRow.uuid == user_id) + .options(load_only(UserRow.domain_name)) + ) + user_row = cast(UserRow | None, await self.db_session.scalar(user_domain_stmt)) + if user_row is None: + return PermissionContext() + if user_row.domain_name != ctx.domain_name: + return PermissionContext() + additional_stmt = ( + sa.select(VFolderPermissionRow) + .select_from(sa.join(VFolderPermissionRow, VFolderRow)) + .where( + (VFolderPermissionRow.user == ctx.user_id) + & ( + VFolderRow.ownership_type == VFolderOwnershipType.USER + ) # filter out project vfolders + ) + ) + object_id_to_additional_permission_map = { + row.vfolder: ADMIN_PERMISSIONS_ON_OTHER_USER_INVITED_FOLDERS + for row in await self.db_session.scalars(additional_stmt) + } + user_id_to_permission_map = {user_id: ADMIN_PERMISSIONS} + return PermissionContext( + user_id_to_permission_map, + object_id_to_additional_permission_map=object_id_to_additional_permission_map, + ) + case UserRole.USER: + if ctx.user_id == user_id: + overriding_stmt = ( + sa.select(VFolderPermissionRow) + .select_from(sa.join(VFolderPermissionRow, VFolderRow)) + .where( + (VFolderPermissionRow.user == ctx.user_id) + & ( + VFolderRow.ownership_type == VFolderOwnershipType.USER + ) # filter out project vfolders + ) + ) + object_id_to_overriding_permission_map = { + row.vfolder: PERMISSION_TO_RBAC_PERMISSION_MAP[row.permission] + for row in await self.db_session.scalars(overriding_stmt) + } + + user_id_to_permission_map = {ctx.user_id: OWNER_PERMISSIONS} + return PermissionContext( + user_id_to_permission_map, + object_id_to_overriding_permission_map=object_id_to_overriding_permission_map, + ) + else: + return PermissionContext() + + async def _build_in_project_scope( + self, + ctx: ClientContext, + project_id: uuid.UUID, + ) -> PermissionContext: + role_in_project = await ctx.get_user_role_in_project(self.db_session, project_id) + match role_in_project: + case UserRoleInProject.ADMIN: + project_id_to_permission_map = {project_id: ADMIN_PERMISSIONS_ON_PROJECT_FOLDERS} + return PermissionContext(project_id_to_permission_map=project_id_to_permission_map) + case UserRoleInProject.USER: + project_id_to_permission_map = {project_id: USER_PERMISSIONS_ON_PROJECT_FOLDERS} + overriding_stmt = ( + sa.select(VFolderPermissionRow) + .select_from(sa.join(VFolderPermissionRow, VFolderRow)) + .where( + (VFolderPermissionRow.user == ctx.user_id) + & (VFolderRow.group == project_id) + ) + ) + object_id_to_overriding_permission_map = { + row.vfolder: PERMISSION_TO_RBAC_PERMISSION_MAP[row.permission] + for row in await self.db_session.scalars(overriding_stmt) + } + return PermissionContext( + project_id_to_permission_map=project_id_to_permission_map, + object_id_to_overriding_permission_map=object_id_to_overriding_permission_map, + ) + case UserRoleInProject.NONE: + return PermissionContext() + + async def _build_in_domain_scope( + self, + ctx: ClientContext, + domain_name: str, + ) -> PermissionContext: + match ctx.user_role: + case UserRole.SUPERADMIN | UserRole.MONITOR: + domain_name_to_permission_map = {domain_name: ADMIN_PERMISSIONS} + + project_ctx = await ctx.get_accessible_projects_in_domain( + self.db_session, domain_name + ) + if project_ctx is not None: + project_id_to_permission_map = { + project_id: ADMIN_PERMISSIONS_ON_PROJECT_FOLDERS + for project_id, _ in project_ctx.items() + } + else: + project_id_to_permission_map = {} + additional_stmt = ( + sa.select(VFolderPermissionRow) + .select_from(sa.join(VFolderPermissionRow, VFolderRow)) + .where( + (VFolderPermissionRow.user == ctx.user_id) + & (VFolderRow.domain_name == domain_name) + ) + ) + object_id_to_additional_permission_map = { + row.vfolder: PERMISSION_TO_RBAC_PERMISSION_MAP[row.permission] + for row in await self.db_session.scalars(additional_stmt) + } + + user_id_to_permission_map = {ctx.user_id: OWNER_PERMISSIONS} + return PermissionContext( + user_id_to_permission_map, + project_id_to_permission_map, + domain_name_to_permission_map, + object_id_to_additional_permission_map=object_id_to_additional_permission_map, + ) + case UserRole.ADMIN: + if ctx.domain_name == domain_name: + domain_name_to_permission_map = {domain_name: ADMIN_PERMISSIONS} + project_ctx = await ctx.get_accessible_projects_in_domain( + self.db_session, domain_name + ) + if project_ctx is not None: + project_id_to_permission_map = { + project_id: ADMIN_PERMISSIONS_ON_PROJECT_FOLDERS + for project_id, _ in project_ctx.items() + } + else: + project_id_to_permission_map = {} + additional_stmt = ( + sa.select(VFolderPermissionRow) + .select_from(sa.join(VFolderPermissionRow, VFolderRow)) + .where( + (VFolderPermissionRow.user == ctx.user_id) + & (VFolderRow.domain_name == domain_name) + ) + ) + object_id_to_additional_permission_map = { + row.vfolder: PERMISSION_TO_RBAC_PERMISSION_MAP[row.permission] + for row in await self.db_session.scalars(additional_stmt) + } + + user_id_to_permission_map = {ctx.user_id: OWNER_PERMISSIONS} + return PermissionContext( + user_id_to_permission_map, + project_id_to_permission_map, + domain_name_to_permission_map, + object_id_to_additional_permission_map=object_id_to_additional_permission_map, + ) + else: + # Only superadmin can access to another domains + return PermissionContext() + case UserRole.USER: + if ctx.domain_name == domain_name: + project_ctx = await ctx.get_accessible_projects_in_domain( + self.db_session, domain_name + ) + if project_ctx is not None: + project_id_to_permission_map = { + project_id: ADMIN_PERMISSIONS_ON_PROJECT_FOLDERS + if role == UserRoleInProject.ADMIN + else USER_PERMISSIONS_ON_PROJECT_FOLDERS + for project_id, role in project_ctx.items() + } + else: + project_id_to_permission_map = {} + overriding_stmt = ( + sa.select(VFolderPermissionRow) + .select_from(sa.join(VFolderPermissionRow, VFolderRow)) + .where( + (VFolderPermissionRow.user == ctx.user_id) + & (VFolderRow.domain_name == domain_name) + ) + ) + object_id_to_overriding_permission_map = { + row.vfolder: PERMISSION_TO_RBAC_PERMISSION_MAP[row.permission] + for row in await self.db_session.scalars(overriding_stmt) + } + + user_id_to_permission_map = {ctx.user_id: OWNER_PERMISSIONS} + return PermissionContext( + user_id_to_permission_map, + project_id_to_permission_map, + object_id_to_overriding_permission_map=object_id_to_overriding_permission_map, + ) + else: + # Only superadmin can access to another domains + return PermissionContext() + + +class VFolderWithPermissionSet(NamedTuple): + vfolder_row: VFolderRow + permissions: frozenset[VFolderRBACPermission] + + +async def get_vfolders( + db_conn: SAConnection, + ctx: ClientContext, + target_scope: BaseScope, + extra_scope: StorageHost | None = None, + requested_permission: VFolderRBACPermission | None = None, + *, + vfolder_id: uuid.UUID | None = None, + vfolder_name: str | None = None, + usage_mode: VFolderUsageMode | None = None, + allowed_status: Container[VFolderOperationStatus] | None = None, + blocked_status: Container[VFolderOperationStatus] | None = None, +) -> list[VFolderWithPermissionSet]: + async with ctx.db.begin_readonly_session(db_conn) as db_session: + ctx_builder = PermissionContextBuilder(db_session) + permission_ctx = await ctx_builder.build(ctx, target_scope, permission=requested_permission) + query_stmt = await permission_ctx.build_query() + if query_stmt is None: + return [] + if vfolder_id is not None: + query_stmt = query_stmt.where(VFolderRow.id == vfolder_id) + if vfolder_name is not None: + query_stmt = query_stmt.where(VFolderRow.name == vfolder_name) + if usage_mode is not None: + query_stmt = query_stmt.where(VFolderRow.usage_mode == usage_mode) + if allowed_status is not None: + query_stmt = query_stmt.where(VFolderRow.status.in_(allowed_status)) + if blocked_status is not None: + query_stmt = query_stmt.where(VFolderRow.status.not_in(blocked_status)) + + result: list[VFolderWithPermissionSet] = [] + for row in await db_session.scalars(query_stmt): + row = cast(VFolderRow, row) + permissions = await permission_ctx.calculate_final_permission(row) + result.append(VFolderWithPermissionSet(row, permissions)) + return result + + +async def validate_permission( + db_conn: SAConnection, + ctx: ClientContext, + target_scope: BaseScope, + extra_scope: StorageHost | None = None, + *, + permission: VFolderRBACPermission, + vfolder_id: uuid.UUID, +) -> None: + async with ctx.db.begin_readonly_session(db_conn) as db_session: + ctx_builder = PermissionContextBuilder(db_session) + permission_ctx = await ctx_builder.build(ctx, target_scope, permission=permission) + query_stmt = await permission_ctx.build_query() + if query_stmt is None: + raise NotEnoughPermission(f"'{permission.name}' not allowed in {str(target_scope)}") + query_stmt = query_stmt.where(VFolderRow.id == vfolder_id) + vfolder_row = cast(VFolderRow | None, await db_session.scalar(query_stmt)) + if vfolder_row is None: + raise VFolderNotFound( + f"VFolder not found (id:{vfolder_id}, permission:{permission.name})" + ) + final_perms = await permission_ctx.calculate_final_permission(vfolder_row) + if permission not in final_perms: + raise NotEnoughPermission(f"'{permission.name}' not allowed in {str(target_scope)}") + + async def get_allowed_vfolder_hosts_by_group( conn: SAConnection, resource_policy, @@ -1222,7 +1754,6 @@ async def ensure_quota_scope_accessible_by_user( quota_scope: QuotaScopeID, user: Mapping[str, Any], ) -> None: - from ai.backend.manager.models import GroupRow, UserRow from ai.backend.manager.models import association_groups_users as agus # Lookup user table to match if quota is scoped to the user @@ -1297,6 +1828,7 @@ class Meta: group = graphene.UUID() # Group.id (current owner, null in user vfolders) group_name = graphene.String() # Group.name (current owenr, null in user vfolders) creator = graphene.String() # User.email (always set) + domain_name = graphene.String(description="Added in 24.09.0.") unmanaged_path = graphene.String() usage_mode = graphene.String() permission = graphene.String() @@ -1333,6 +1865,7 @@ def _get_field(name: str) -> Any: group=row["group"], group_name=_get_field("groups_name"), creator=row["creator"], + domain_name=row["domain_name"], unmanaged_path=row["unmanaged_path"], usage_mode=row["usage_mode"], permission=row["permission"], @@ -1347,6 +1880,31 @@ def _get_field(name: str) -> Any: cur_size=row["cur_size"], ) + @classmethod + def from_orm_row(cls, row: VFolderRow) -> VirtualFolder: + return cls( + id=row.id, + host=row.host, + quota_scope_id=row.quota_scope_id, + name=row.name, + user=row.user, + user_email=row.user_row.email if row.user_row is not None else None, + group=row.group, + group_name=row.group_row.name if row.group_row is not None else None, + creator=row.creator, + unmanaged_path=row.unmanaged_path, + usage_mode=row.usage_mode, + permission=row.permission, + ownership_type=row.ownership_type, + max_files=row.max_files, + max_size=row.max_size, + created_at=row.created_at, + last_used=row.last_used, + cloneable=row.cloneable, + status=row.status, + cur_size=row.cur_size, + ) + async def resolve_num_files(self, info: graphene.ResolveInfo) -> int: # TODO: measure on-the-fly return 0 @@ -1361,6 +1919,7 @@ async def resolve_num_files(self, info: graphene.ResolveInfo) -> int: "user": ("vfolders_user", uuid.UUID), "user_email": ("users_email", None), "creator": ("vfolders_creator", None), + "domain_name": ("vfolders_domain_name", None), "unmanaged_path": ("vfolders_unmanaged_path", None), "usage_mode": ( "vfolders_usage_mode", @@ -1392,6 +1951,7 @@ async def resolve_num_files(self, info: graphene.ResolveInfo) -> int: "name": ("vfolders_name", None), "group": ("vfolders_group", None), "group_name": ("groups_name", None), + "domain_name": ("domain_name", None), "user": ("vfolders_user", None), "user_email": ("users_email", None), "creator": ("vfolders_creator", None), @@ -1912,6 +2472,10 @@ async def get_connection( last=last, ) + query = query.options( + joinedload(VFolderRow.user_row), + joinedload(VFolderRow.group_row), + ) async with graph_ctx.db.begin_readonly_session() as db_session: vfolder_rows = (await db_session.scalars(query)).all() result = [(cls.from_row(info, vf)) for vf in vfolder_rows] @@ -2059,8 +2623,6 @@ def resolve_id(self, info: graphene.ResolveInfo) -> str: return f"QuotaScope:{self.storage_host_name}/{self.quota_scope_id}" async def resolve_details(self, info: graphene.ResolveInfo) -> Optional[int]: - from ai.backend.manager.models import GroupRow, UserRow - graph_ctx: GraphQueryContext = info.context proxy_name, volume_name = graph_ctx.storage_manager.split_host(self.storage_host_name) try: @@ -2222,6 +2784,7 @@ class Meta: name = graphene.String() vfolder = graphene.Field(VirtualFolder) + vfolder_node = graphene.Field(VirtualFolderNode, description="Added in 24.09.0.") author = graphene.String() title = graphene.String(description="Human readable name of the model.") version = graphene.String() @@ -2304,7 +2867,7 @@ def resolve_created_at( ) -> datetime: try: return dtparse(self.created_at) - except ParserError: + except (TypeError, ParserError): return self.created_at def resolve_modified_at( @@ -2313,7 +2876,7 @@ def resolve_modified_at( ) -> datetime: try: return dtparse(self.modified_at) - except ParserError: + except (TypeError, ParserError): return self.modified_at @classmethod @@ -2338,6 +2901,8 @@ def parse_model( name = vfolder_row.name return cls( id=vfolder_row.id, + vfolder=VirtualFolder.from_orm_row(vfolder_row), + vfolder_node=VirtualFolderNode.from_row(resolve_info, vfolder_row), name=name, author=metadata.get("author") or vfolder_row.creator or "", title=metadata.get("title") or vfolder_row.name, @@ -2451,7 +3016,9 @@ async def get_node(cls, info: graphene.ResolveInfo, id: str) -> ModelCard: _, vfolder_row_id = AsyncNode.resolve_global_id(info, id) async with graph_ctx.db.begin_readonly_session() as db_session: - vfolder_row = await VFolderRow.get(db_session, uuid.UUID(vfolder_row_id)) + vfolder_row = await VFolderRow.get( + db_session, uuid.UUID(vfolder_row_id), load_user=True, load_group=True + ) if vfolder_row.usage_mode != VFolderUsageMode.MODEL: raise ValueError( f"The vfolder is not model. expect: {VFolderUsageMode.MODEL.value}, got:" @@ -2522,7 +3089,10 @@ async def get_connection( VFolderRow.group.in_(model_store_project_gids) ) query = query.where(additional_cond) - cnt_query = cnt_query.where(additional_cond) + query = query.options( + joinedload(VFolderRow.user_row), + joinedload(VFolderRow.group_row), + ) async with graph_ctx.db.begin_readonly_session() as db_session: vfolder_rows = (await db_session.scalars(query)).all() result = [(await cls.from_row(info, vf)) for vf in vfolder_rows] diff --git a/src/ai/backend/manager/registry.py b/src/ai/backend/manager/registry.py index 456959a5dc8..df48dbf3c01 100644 --- a/src/ai/backend/manager/registry.py +++ b/src/ai/backend/manager/registry.py @@ -20,6 +20,7 @@ Any, Dict, List, + Literal, Mapping, MutableMapping, Optional, @@ -45,7 +46,7 @@ from redis.asyncio import Redis from sqlalchemy.exc import DBAPIError from sqlalchemy.ext.asyncio import AsyncSession -from sqlalchemy.orm import load_only, noload, selectinload +from sqlalchemy.orm import load_only, noload, selectinload, with_loader_criteria from sqlalchemy.orm.exc import NoResultFound from yarl import URL @@ -150,6 +151,7 @@ SessionDependencyRow, SessionRow, SessionStatus, + UserRole, UserRow, agents, domains, @@ -164,6 +166,12 @@ scaling_groups, verify_vfolder_name, ) +from .models.session import ( + COMPUTE_CONCURRENCY_USED_KEY_PREFIX, + SESSION_KERNEL_STATUS_MAPPING, + SYSTEM_CONCURRENCY_USED_KEY_PREFIX, + ConcurrencyUsed, +) from .models.utils import ( ExtendedAsyncSAEngine, execute_with_retry, @@ -1618,11 +1626,23 @@ async def _update_session_occupying_slots(db_session: AsyncSession) -> None: SessionRow.name, SessionRow.creation_id, SessionRow.access_key, + SessionRow.session_type, ), + selectinload( + SessionRow.kernels, + ).options( + load_only( + KernelRow.id, + KernelRow.agent, + KernelRow.cluster_role, + KernelRow.startup_command, + ) + ), + with_loader_criteria(KernelRow, KernelRow.cluster_role == DEFAULT_ROLE), ) ) async with self.db.begin_readonly_session() as db_session: - updated_session = (await db_session.scalars(query)).first() + updated_session = cast(SessionRow, await db_session.scalar(query)) log.debug( "Producing SessionStartedEvent({}, {})", @@ -1640,6 +1660,9 @@ async def _update_session_occupying_slots(db_session: AsyncSession) -> None: updated_session.access_key, ), ) + + if updated_session.session_type == SessionTypes.BATCH: + await self.trigger_batch_execution(updated_session) except Exception: log.exception("error while executing _finalize_running") raise @@ -1957,17 +1980,11 @@ async def _update_agent_resource() -> None: await execute_with_retry(_update_agent_resource) async def recalc_resource_usage(self, do_fullscan: bool = False) -> None: - concurrency_used_per_key: MutableMapping[str, set] = defaultdict( - set - ) # key: access_key, value: set of session_id - sftp_concurrency_used_per_key: MutableMapping[str, set] = defaultdict( - set - ) # key: access_key, value: set of session_id - - async def _recalc() -> None: + async def _recalc() -> Mapping[AccessKey, ConcurrencyUsed]: occupied_slots_per_agent: MutableMapping[str, ResourceSlot] = defaultdict( lambda: ResourceSlot({"cpu": 0, "mem": 0}) ) + access_key_to_concurrency_used: dict[AccessKey, ConcurrencyUsed] = {} async with self.db.begin_session() as db_sess: # Query running containers and calculate concurrency_used per AK and @@ -1998,12 +2015,19 @@ async def _recalc() -> None: kernel.occupied_slots ) if session_status in USER_RESOURCE_OCCUPYING_SESSION_STATUSES: + access_key = cast(AccessKey, session_row.access_key) + if access_key not in access_key_to_concurrency_used: + access_key_to_concurrency_used[access_key] = ConcurrencyUsed( + access_key + ) if kernel.role in PRIVATE_KERNEL_ROLES: - sftp_concurrency_used_per_key[session_row.access_key].add( + access_key_to_concurrency_used[access_key].system_session_ids.add( session_row.id ) else: - concurrency_used_per_key[session_row.access_key].add(session_row.id) + access_key_to_concurrency_used[access_key].compute_session_ids.add( + session_row.id + ) if len(occupied_slots_per_agent) > 0: # Update occupied_slots for agents with running containers. @@ -2033,54 +2057,54 @@ async def _recalc() -> None: .where(AgentRow.status == AgentStatus.ALIVE) ) await db_sess.execute(query) + return access_key_to_concurrency_used - await execute_with_retry(_recalc) + access_key_to_concurrency_used = await execute_with_retry(_recalc) # Update keypair resource usage for keypairs with running containers. - kp_key = "keypair.concurrency_used" - sftp_kp_key = "keypair.sftp_concurrency_used" - async def _update(r: Redis): - updates = { - f"{kp_key}.{ak}": len(session_ids) - for ak, session_ids in concurrency_used_per_key.items() - } | { - f"{sftp_kp_key}.{ak}": len(session_ids) - for ak, session_ids in sftp_concurrency_used_per_key.items() - } + updates: dict[str, int] = {} + for concurrency in access_key_to_concurrency_used.values(): + updates |= concurrency.to_cnt_map() if updates: await r.mset(typing.cast(MSetType, updates)) async def _update_by_fullscan(r: Redis): updates = {} - keys = await r.keys(f"{kp_key}.*") + keys = await r.keys(f"{COMPUTE_CONCURRENCY_USED_KEY_PREFIX}*") for stat_key in keys: if isinstance(stat_key, bytes): _stat_key = stat_key.decode("utf-8") else: - _stat_key = stat_key - ak = _stat_key.replace(f"{kp_key}.", "") - session_concurrency = concurrency_used_per_key.get(ak) - usage = len(session_concurrency) if session_concurrency is not None else 0 + _stat_key = cast(str, stat_key) + ak = _stat_key.replace(COMPUTE_CONCURRENCY_USED_KEY_PREFIX, "") + concurrent_sessions = access_key_to_concurrency_used.get(AccessKey(ak)) + usage = ( + len(concurrent_sessions.compute_session_ids) + if concurrent_sessions is not None + else 0 + ) updates[_stat_key] = usage - keys = await r.keys(f"{sftp_kp_key}.*") + keys = await r.keys(f"{SYSTEM_CONCURRENCY_USED_KEY_PREFIX}*") for stat_key in keys: if isinstance(stat_key, bytes): _stat_key = stat_key.decode("utf-8") else: - _stat_key = stat_key - ak = _stat_key.replace(f"{sftp_kp_key}.", "") - session_concurrency = sftp_concurrency_used_per_key.get(ak) - usage = len(session_concurrency) if session_concurrency is not None else 0 + _stat_key = cast(str, stat_key) + ak = _stat_key.replace(SYSTEM_CONCURRENCY_USED_KEY_PREFIX, "") + concurrent_sessions = access_key_to_concurrency_used.get(AccessKey(ak)) + usage = ( + len(concurrent_sessions.system_concurrency_used_key) + if concurrent_sessions is not None + else 0 + ) updates[_stat_key] = usage if updates: await r.mset(typing.cast(MSetType, updates)) # Do full scan if the entire system does not have ANY sessions/sftp-sessions # to set all concurrency_used to 0 - _do_fullscan = do_fullscan or ( - not concurrency_used_per_key and not sftp_concurrency_used_per_key - ) + _do_fullscan = do_fullscan or not access_key_to_concurrency_used if _do_fullscan: await redis_helper.execute( self.redis_stat, @@ -2138,6 +2162,7 @@ async def destroy_session( *, forced: bool = False, reason: Optional[KernelLifecycleEventReason] = None, + user_role: UserRole | None = None, ) -> Mapping[str, Any]: """ Destroy session kernels. Do not destroy @@ -2162,6 +2187,50 @@ async def destroy_session( if hook_result.status != PASSED: raise RejectedByHook.from_hook_result(hook_result) + async def _force_destroy_for_suadmin( + target_status: Literal[SessionStatus.CANCELLED, SessionStatus.TERMINATED], + ) -> None: + current_time = datetime.now(tzutc()) + destroy_reason = str(KernelLifecycleEventReason.FORCE_TERMINATED) + + async def _destroy(db_session: AsyncSession) -> SessionRow: + _stmt = ( + sa.select(SessionRow) + .where(SessionRow.id == session_id) + .options(selectinload(SessionRow.kernels)) + ) + session_row = cast(SessionRow | None, await db_session.scalar(_stmt)) + if session_row is None: + raise SessionNotFound(f"Session not found (id: {session_id})") + kernel_rows = cast(list[KernelRow], session_row.kernels) + kernel_target_status = SESSION_KERNEL_STATUS_MAPPING[target_status] + for kern in kernel_rows: + kern.status = kernel_target_status + kern.terminated_at = current_time + kern.status_info = destroy_reason + kern.status_history = sql_json_merge( + KernelRow.status_history, + (), + { + kernel_target_status.name: current_time.isoformat(), + }, + ) + session_row.status = target_status + session_row.terminated_at = current_time + session_row.status_info = destroy_reason + session_row.status_history = sql_json_merge( + SessionRow.status_history, + (), + { + target_status.name: current_time.isoformat(), + }, + ) + return session_row + + async with self.db.connect() as db_conn: + await execute_with_txn_retry(_destroy, self.db.begin_session, db_conn) + await self.recalc_resource_usage() + async with handle_session_exception( self.db, "destroy_session", @@ -2200,6 +2269,17 @@ async def destroy_session( self.db, session_id, SessionStatus.CANCELLED ) case SessionStatus.PULLING: + # Exceptionally allow superadmins to destroy PULLING sessions. + # Clients should be informed that they have to handle the containers destroyed here. + # TODO: detach image-pull process from kernel-start process and allow all users to destroy PULLING sessions. + if forced and user_role == UserRole.SUPERADMIN: + log.warning( + "force-terminating session (s:{}, status:{})", + session_id, + target_session.status, + ) + await _force_destroy_for_suadmin(SessionStatus.CANCELLED) + return {} raise GenericForbidden("Cannot destroy sessions in pulling status") case ( SessionStatus.SCHEDULED @@ -2217,12 +2297,18 @@ async def destroy_session( session_id, target_session.status, ) - await SessionRow.set_session_status( - self.db, session_id, SessionStatus.TERMINATING - ) - await self.event_producer.produce_event( - SessionTerminatingEvent(session_id, reason), - ) + if user_role == UserRole.SUPERADMIN: + # Exceptionally let superadmins set the session status to 'TERMINATED' and finish the function. + # TODO: refactor Session/Kernel status management and remove this. + await _force_destroy_for_suadmin(SessionStatus.TERMINATED) + return {} + else: + await SessionRow.set_session_status( + self.db, session_id, SessionStatus.TERMINATING + ) + await self.event_producer.produce_event( + SessionTerminatingEvent(session_id, reason), + ) case SessionStatus.TERMINATED: raise GenericForbidden( "Cannot destroy sessions that has already been already terminated" @@ -2605,6 +2691,9 @@ async def _restart_kernel(kernel: KernelRow) -> None: SessionStartedEvent(session.id, session.creation_id), ) + if session.session_type == SessionTypes.BATCH: + await self.trigger_batch_execution(session) + async def execute( self, session: SessionRow, @@ -2638,6 +2727,22 @@ async def execute( flush_timeout, ) + async def trigger_batch_execution( + self, + session: SessionRow, + ) -> None: + async with handle_session_exception(self.db, "trigger_batch_execution", session.id): + async with self.agent_cache.rpc_context( + session.main_kernel.agent, + invoke_timeout=30, + order_key=session.main_kernel.id, + ) as rpc: + return await rpc.call.trigger_batch_execution( + str(session.id), + str(session.main_kernel.id), + session.main_kernel.startup_command or "", + ) + async def interrupt_session( self, session: SessionRow, @@ -2739,14 +2844,20 @@ async def list_files( async def get_logs_from_agent( self, session: SessionRow, + kernel_id: KernelId | None = None, ) -> str: async with handle_session_exception(self.db, "get_logs_from_agent", session.id): + kernel = ( + session.get_kernel_by_id(kernel_id) + if kernel_id is not None + else session.main_kernel + ) async with self.agent_cache.rpc_context( - session.main_kernel.agent, + agent_id=kernel.agent, invoke_timeout=30, - order_key=session.main_kernel.id, + order_key=kernel.id, ) as rpc: - reply = await rpc.call.get_logs(str(session.main_kernel.id)) + reply = await rpc.call.get_logs(str(kernel.id)) return reply["logs"] async def increment_session_usage( @@ -3280,10 +3391,15 @@ async def commit_session_to_file( img_path, _, image_name = filtered.partition("/") filename = f"{now}_{shortend_sname}_{image_name}.tar.gz" filename = filename.replace(":", "-") + image_ref = ImageRef(kernel.image, [registry], kernel.architecture) async with handle_session_exception(self.db, "commit_session_to_file", session.id): async with self.agent_cache.rpc_context(kernel.agent, order_key=kernel.id) as rpc: resp: Mapping[str, Any] = await rpc.call.commit( - str(kernel.id), email, filename=filename, extra_labels=extra_labels + str(kernel.id), + email, + filename=filename, + extra_labels=extra_labels, + canonical=image_ref.canonical, ) return resp diff --git a/src/ai/backend/plugin/BUILD b/src/ai/backend/plugin/BUILD index 0f476742658..14d7f124008 100644 --- a/src/ai/backend/plugin/BUILD +++ b/src/ai/backend/plugin/BUILD @@ -32,6 +32,11 @@ python_distribution( description="Backend.AI Plugin Subsystem", license="MIT", ), + entry_points={ + "backendai_cli_v10": { + "plugin": "ai.backend.plugin.cli:main", + }, + }, generate_setup=True, tags=["wheel"], ) diff --git a/src/ai/backend/plugin/cli.py b/src/ai/backend/plugin/cli.py new file mode 100644 index 00000000000..63e3dc04a57 --- /dev/null +++ b/src/ai/backend/plugin/cli.py @@ -0,0 +1,89 @@ +import enum +import itertools +import json +from collections import defaultdict + +import click +import colorama +import tabulate +from colorama import Fore, Style + +from .entrypoint import ( + scan_entrypoint_from_buildscript, + scan_entrypoint_from_package_metadata, + scan_entrypoint_from_plugin_checkouts, +) + + +class FormatOptions(enum.StrEnum): + CONSOLE = "console" + JSON = "json" + + +@click.group() +def main(): + """The root entrypoint for unified CLI of the plugin subsystem""" + pass + + +@main.command() +@click.argument("group_name") +@click.option( + "--format", + type=click.Choice([*FormatOptions]), + default=FormatOptions.CONSOLE, + show_default=True, + help="Set the output format.", +) +def scan(group_name: str, format: FormatOptions) -> None: + duplicate_count: dict[str, int] = defaultdict(int) + rows = [] + for source, entrypoint in itertools.chain( + (("buildscript", item) for item in scan_entrypoint_from_buildscript(group_name)), + (("plugin-checkout", item) for item in scan_entrypoint_from_plugin_checkouts(group_name)), + (("python-package", item) for item in scan_entrypoint_from_package_metadata(group_name)), + ): + duplicate_count[entrypoint.name] += 1 + rows.append((source, entrypoint.name, entrypoint.module)) + rows.sort(key=lambda row: (row[2], row[1], row[0])) + match format: + case FormatOptions.CONSOLE: + if not rows: + print(f"No plugins found for the entrypoint {group_name!r}") + return + colorama.init(autoreset=True) + ITALIC = colorama.ansi.code_to_chars(3) + src_style = { + "buildscript": Fore.LIGHTYELLOW_EX, + "plugin-checkout": Fore.LIGHTGREEN_EX, + "python-package": Fore.LIGHTBLUE_EX, + } + display_headers = ( + f"{ITALIC}Source{Style.RESET_ALL}", + f"{ITALIC}Name{Style.RESET_ALL}", + f"{ITALIC}Module Path{Style.RESET_ALL}", + ) + display_rows = [] + has_duplicate = False + for source, name, module_path in rows: + name_style = Style.BRIGHT + if duplicate_count[name] > 1: + has_duplicate = True + name_style = Fore.RED + Style.BRIGHT + display_rows.append(( + f"{src_style[source]}{source}{Style.RESET_ALL}", + f"{name_style}{name}{Style.RESET_ALL}", + module_path, + )) + print(tabulate.tabulate(display_rows, display_headers)) + if has_duplicate: + print(f"\n💥 {Fore.LIGHTRED_EX}Detected duplicated entrypoint(s)!{Style.RESET_ALL}") + case FormatOptions.JSON: + output_rows = [] + for source, name, module_path in rows: + output_rows.append({ + "source": source, + "name": name, + "module_path": module_path, + }) + print(json.dumps(output_rows, indent=2)) diff --git a/src/ai/backend/storage/api/manager.py b/src/ai/backend/storage/api/manager.py index 0cbd9dfc5f9..0746b2c38fd 100644 --- a/src/ai/backend/storage/api/manager.py +++ b/src/ai/backend/storage/api/manager.py @@ -283,13 +283,16 @@ class Params(TypedDict): quota_usage = await volume.quota_model.describe_quota_scope(params["qsid"]) if not quota_usage: await volume.quota_model.create_quota_scope(params["qsid"], params["options"]) - try: - await volume.quota_model.update_quota_scope(params["qsid"], params["options"]) - except InvalidQuotaConfig: - return web.json_response( - {"msg": "Invalid quota config option"}, - status=400, - ) + else: + try: + await volume.quota_model.update_quota_scope( + params["qsid"], params["options"] + ) + except InvalidQuotaConfig: + return web.json_response( + {"msg": "Invalid quota config option"}, + status=400, + ) return web.Response(status=204) diff --git a/src/ai/backend/storage/ddn/__init__.py b/src/ai/backend/storage/ddn/__init__.py index 3e73e8d7c18..640625c4546 100644 --- a/src/ai/backend/storage/ddn/__init__.py +++ b/src/ai/backend/storage/ddn/__init__.py @@ -141,9 +141,6 @@ async def create_quota_scope( if quota_usage is not None: raise QuotaScopeAlreadyExists - if options is None: - return - # Set projectID to the directory try: await run([ @@ -159,7 +156,8 @@ async def create_quota_scope( except CalledProcessError as e: raise RuntimeError(f"'lfs project -p {project_id}' command failed: {e.stderr}") - await self._set_quota_by_project(project_id, qspath, options) + if options is not None: + await self._set_quota_by_project(project_id, qspath, options) async def describe_quota_scope(self, quota_scope_id: QuotaScopeID) -> QuotaUsage | None: """ diff --git a/src/ai/backend/storage/purestorage/__init__.py b/src/ai/backend/storage/purestorage/__init__.py index 0e64056b4ae..09e9041fca1 100644 --- a/src/ai/backend/storage/purestorage/__init__.py +++ b/src/ai/backend/storage/purestorage/__init__.py @@ -2,170 +2,50 @@ import asyncio import contextlib -import json -import os -from pathlib import Path -from subprocess import CalledProcessError -from typing import AsyncIterator, FrozenSet +import logging +import re +from typing import FrozenSet -from ai.backend.common.types import BinarySize, HardwareMetadata +from ai.backend.common.logging_utils import BraceStyleAdapter +from ai.backend.common.types import HardwareMetadata from ..abc import CAP_FAST_FS_SIZE, CAP_FAST_SCAN, CAP_METRIC, CAP_VFOLDER, AbstractFSOpModel -from ..subproc import run -from ..types import CapacityUsage, DirEntry, DirEntryType, FSPerfMetric, Stat, TreeUsage -from ..utils import fstime2datetime -from ..vfs import BaseFSOpModel, BaseVolume +from ..types import CapacityUsage, FSPerfMetric +from ..vfs import BaseVolume from .purity import PurityClient +from .rapidfiles import RapidFileToolsFSOpModel +from .rapidfiles_v2 import RapidFileToolsv2FSOpModel +FLASHBLADE_TOOLKIT_V2_VERSION_RE = re.compile(r"version p[a-zA-Z\d]+ \(RapidFile\) (2\..+)") +FLASHBLADE_TOOLKIT_V1_VERSION_RE = re.compile(r"p[a-zA-Z\d]+ \(RapidFile Toolkit\) (1\..+)") -class RapidFileToolsFSOpModel(BaseFSOpModel): - async def copy_tree( - self, - src_path: Path, - dst_path: Path, - ) -> None: - extra_opts: list[bytes] = [] - if src_path.is_dir(): - extra_opts.append(b"-r") - try: - await run([ - b"pcp", - *extra_opts, - b"-p", - # os.fsencode(src_path / "."), # TODO: check if "/." is necessary? - os.fsencode(src_path), - os.fsencode(dst_path), - ]) - except CalledProcessError as e: - raise RuntimeError(f'"pcp" command failed: {e.stderr}') - - async def delete_tree( - self, - path: Path, - ) -> None: - try: - await run([ - b"prm", - b"-r", - os.fsencode(path), - ]) - except CalledProcessError as e: - raise RuntimeError(f"'prm' command failed: {e.stderr}") - - def scan_tree( - self, - path: Path, - *, - recursive: bool = True, - ) -> AsyncIterator[DirEntry]: - raw_target_path = os.fsencode(path) - - async def _aiter() -> AsyncIterator[DirEntry]: - proc = await asyncio.create_subprocess_exec( - b"pls", - b"--json", - raw_target_path, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.STDOUT, - ) - assert proc.stdout is not None - try: - while True: - line = await proc.stdout.readline() - if not line: - break - line = line.rstrip(b"\n") - item = json.loads(line) - item_path = Path(item["path"]) - entry_type = DirEntryType.FILE - if item["filetype"] == 40000: - entry_type = DirEntryType.DIRECTORY - if item["filetype"] == 120000: - entry_type = DirEntryType.SYMLINK - yield DirEntry( - name=item_path.name, - path=item_path, - type=entry_type, - stat=Stat( - size=item["size"], - owner=str(item["uid"]), - # The integer represents the octal number in decimal - # (e.g., 644 which actually means 0o644) - mode=int(str(item["mode"]), 8), - modified=fstime2datetime(item["mtime"]), - created=fstime2datetime(item["ctime"]), - ), - symlink_target="", # TODO: should be tested on PureStorage - ) - finally: - await proc.wait() - - return _aiter() - - async def scan_tree_usage( - self, - path: Path, - ) -> TreeUsage: - total_size = 0 - total_count = 0 - raw_target_path = os.fsencode(path) - # Measure the exact file sizes and bytes - proc = await asyncio.create_subprocess_exec( - b"pdu", - b"-0", - b"-b", - b"-a", - b"-s", - raw_target_path, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.STDOUT, - ) - assert proc.stdout is not None - try: - # TODO: check slowdowns when there are millions of files - while True: - try: - line = await proc.stdout.readuntil(b"\0") - line = line.rstrip(b"\0") - except asyncio.IncompleteReadError: - break - size, name = line.split(maxsplit=1) - if len(name) != len(raw_target_path) and name != raw_target_path: - total_size += int(size) - total_count += 1 - finally: - await proc.wait() - return TreeUsage(file_count=total_count, used_bytes=total_size) - - async def scan_tree_size( - self, - path: Path, - ) -> BinarySize: - proc = await asyncio.create_subprocess_exec( - b"pdu", - b"-hs", - bytes(path), - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - stdout, stderr = await proc.communicate() - if proc.returncode != 0: - raise RuntimeError(f"pdu command failed: {stderr.decode()}") - used_bytes, _ = stdout.decode().split() - return BinarySize.finite_from_str(used_bytes) +log = BraceStyleAdapter(logging.getLogger(__spec__.name)) # type: ignore[name-defined] class FlashBladeVolume(BaseVolume): name = "purestorage" + _toolkit_version: int | None + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + self._toolkit_version = None async def create_fsop_model(self) -> AbstractFSOpModel: - return RapidFileToolsFSOpModel( - self.mount_path, - self.local_config["storage-proxy"]["scandir-limit"], - ) + if (await self.get_toolkit_version()) == 2: + return RapidFileToolsv2FSOpModel( + self.mount_path, + self.local_config["storage-proxy"]["scandir-limit"], + ) + else: + return RapidFileToolsFSOpModel( + self.mount_path, + self.local_config["storage-proxy"]["scandir-limit"], + ) - async def init(self) -> None: - available = True + async def get_toolkit_version(self) -> int: + if self._toolkit_version is not None: + return self._toolkit_version try: proc = await asyncio.create_subprocess_exec( b"pdu", @@ -174,15 +54,31 @@ async def init(self) -> None: stderr=asyncio.subprocess.STDOUT, ) except FileNotFoundError: - available = False - else: - try: - stdout, stderr = await proc.communicate() - if b"RapidFile Toolkit" not in stdout or proc.returncode != 0: - available = False - finally: - await proc.wait() - if not available: + self._toolkit_version = -1 + return -1 + try: + stdout, stderr = await proc.communicate() + if proc.returncode != 0: + self._toolkit_version = -1 + else: + version_line = stdout.decode().splitlines()[0] + if FLASHBLADE_TOOLKIT_V2_VERSION_RE.match(version_line): + self._toolkit_version = 2 + log.info("FlashBlade Toolkit 2 detected") + elif FLASHBLADE_TOOLKIT_V1_VERSION_RE.match(version_line): + self._toolkit_version = 1 + log.info("FlashBlade Toolkit 1 detected") + else: + log.warn("Unrecogized FlashBlade Toolkit version: {}", version_line) + self._toolkit_version = -1 + finally: + await proc.wait() + assert self._toolkit_version + return self._toolkit_version + + async def init(self) -> None: + toolkit_version = await self.get_toolkit_version() + if toolkit_version == -1: raise RuntimeError( "PureStorage RapidFile Toolkit is not installed. " "You cannot use the PureStorage backend for the stroage proxy.", diff --git a/src/ai/backend/storage/purestorage/rapidfiles.py b/src/ai/backend/storage/purestorage/rapidfiles.py new file mode 100644 index 00000000000..9534eb8d3d4 --- /dev/null +++ b/src/ai/backend/storage/purestorage/rapidfiles.py @@ -0,0 +1,150 @@ +import asyncio +import json +import os +from pathlib import Path +from subprocess import CalledProcessError +from typing import AsyncIterator + +from ai.backend.common.types import BinarySize + +from ..subproc import run +from ..types import DirEntry, DirEntryType, Stat, TreeUsage +from ..utils import fstime2datetime +from ..vfs import BaseFSOpModel + + +class RapidFileToolsFSOpModel(BaseFSOpModel): + async def copy_tree( + self, + src_path: Path, + dst_path: Path, + ) -> None: + extra_opts: list[bytes] = [] + if src_path.is_dir(): + extra_opts.append(b"-r") + try: + await run([ + b"pcp", + *extra_opts, + b"-p", + # os.fsencode(src_path / "."), # TODO: check if "/." is necessary? + os.fsencode(src_path), + os.fsencode(dst_path), + ]) + except CalledProcessError as e: + raise RuntimeError(f'"pcp" command failed: {e.stderr}') + + async def delete_tree( + self, + path: Path, + ) -> None: + try: + await run([ + b"prm", + b"-r", + os.fsencode(path), + ]) + except CalledProcessError as e: + raise RuntimeError(f"'prm' command failed: {e.stderr}") + + def scan_tree( + self, + path: Path, + *, + recursive: bool = True, + ) -> AsyncIterator[DirEntry]: + raw_target_path = os.fsencode(path) + + async def _aiter() -> AsyncIterator[DirEntry]: + proc = await asyncio.create_subprocess_exec( + b"pls", + b"--json", + raw_target_path, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.STDOUT, + ) + assert proc.stdout is not None + try: + while True: + line = await proc.stdout.readline() + if not line: + break + line = line.rstrip(b"\n") + item = json.loads(line) + item_path = Path(item["path"]) + entry_type = DirEntryType.FILE + if item["filetype"] == 40000: + entry_type = DirEntryType.DIRECTORY + if item["filetype"] == 120000: + entry_type = DirEntryType.SYMLINK + yield DirEntry( + name=item_path.name, + path=item_path, + type=entry_type, + stat=Stat( + size=item["size"], + owner=str(item["uid"]), + # The integer represents the octal number in decimal + # (e.g., 644 which actually means 0o644) + mode=int(str(item["mode"]), 8), + modified=fstime2datetime(item["mtime"]), + created=fstime2datetime(item["ctime"]), + ), + symlink_target="", # TODO: should be tested on PureStorage + ) + finally: + await proc.wait() + + return _aiter() + + async def scan_tree_usage( + self, + path: Path, + ) -> TreeUsage: + total_size = 0 + total_count = 0 + raw_target_path = os.fsencode(path) + # Measure the exact file sizes and bytes + proc = await asyncio.create_subprocess_exec( + b"pdu", + b"-0", + b"-b", + b"-a", + b"-s", + raw_target_path, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.STDOUT, + ) + assert proc.stdout is not None + try: + # TODO: check slowdowns when there are millions of files + while True: + try: + line = await proc.stdout.readuntil(b"\0") + line = line.rstrip(b"\0") + except asyncio.IncompleteReadError: + break + size, name = line.split(maxsplit=1) + if len(name) != len(raw_target_path) and name != raw_target_path: + total_size += int(size) + total_count += 1 + finally: + await proc.wait() + return TreeUsage(file_count=total_count, used_bytes=total_size) + + async def scan_tree_size( + self, + path: Path, + ) -> BinarySize: + proc = await asyncio.create_subprocess_exec( + b"pdu", + b"-hs", + bytes(path), + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + stdout, stderr = await proc.communicate() + if proc.returncode != 0: + raise RuntimeError(f"pdu command failed: {stderr.decode()}") + used_bytes, _ = stdout.decode().split() + return BinarySize.finite_from_str(used_bytes) diff --git a/src/ai/backend/storage/purestorage/rapidfiles_v2.py b/src/ai/backend/storage/purestorage/rapidfiles_v2.py new file mode 100644 index 00000000000..f469480b626 --- /dev/null +++ b/src/ai/backend/storage/purestorage/rapidfiles_v2.py @@ -0,0 +1,118 @@ +import asyncio +import json +import os +from pathlib import Path +from subprocess import CalledProcessError +from typing import AsyncIterator + +from ai.backend.storage.utils import fstime2datetime + +from ..subproc import run +from ..types import DirEntry, DirEntryType, Stat, TreeUsage +from .rapidfiles import RapidFileToolsFSOpModel + + +class RapidFileToolsv2FSOpModel(RapidFileToolsFSOpModel): + async def copy_tree( + self, + src_path: Path, + dst_path: Path, + ) -> None: + extra_opts: list[bytes] = [] + if src_path.is_dir(): + extra_opts.append(b"-r") + if dst_path.is_dir(): + extra_opts.append(b"-T") + try: + await run([ # noqa: F821 + b"pcopy", + *extra_opts, + b"-p", + # os.fsencode(src_path / "."), # TODO: check if "/." is necessary? + os.fsencode(src_path), + os.fsencode(dst_path), + ]) + except CalledProcessError as e: + raise RuntimeError(f'"pcopy" command failed: {e.stderr}') + + def scan_tree( + self, + path: Path, + *, + recursive: bool = True, + ) -> AsyncIterator[DirEntry]: + raw_target_path = os.fsencode(path) + + async def _aiter() -> AsyncIterator[DirEntry]: + proc = await asyncio.create_subprocess_exec( + b"pls", + b"--jsonlines", + raw_target_path, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.STDOUT, + ) + assert proc.stdout is not None + try: + while True: + line = await proc.stdout.readline() + if not line: + break + line = line.rstrip(b"\n") + item = json.loads(line) + item_path = Path(item["path"]) + entry_type = DirEntryType.FILE + if item["filetype"] == 40000: + entry_type = DirEntryType.DIRECTORY + if item["filetype"] == 120000: + entry_type = DirEntryType.SYMLINK + yield DirEntry( + name=item_path.name, + path=item_path, + type=entry_type, + stat=Stat( + size=item["size"], + owner=str(item["uid"]), + mode=item["mode"], + modified=fstime2datetime(item["mtime"]), + created=fstime2datetime(item["ctime"]), + ), + symlink_target="", # TODO: should be tested on PureStorage + ) + finally: + await proc.wait() + + return _aiter() + + async def scan_tree_usage( + self, + path: Path, + ) -> TreeUsage: + total_size = 0 + total_count = 0 + raw_target_path = os.fsencode(path) + # Measure the exact file sizes and bytes + proc = await asyncio.create_subprocess_exec( + b"pdu", + b"-0", + b"-b", + b"-a", + raw_target_path, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.STDOUT, + ) + assert proc.stdout is not None + try: + # TODO: check slowdowns when there are millions of files + while True: + try: + line = await proc.stdout.readuntil(b"\0") + line = line.rstrip(b"\0") + except asyncio.IncompleteReadError: + break + size, name = line.split(maxsplit=1) + if len(name) != len(raw_target_path) and name != raw_target_path: + total_size += int(size) + total_count += 1 + finally: + await proc.wait() + return TreeUsage(file_count=total_count, used_bytes=total_size) diff --git a/src/ai/backend/testutils/bootstrap.py b/src/ai/backend/testutils/bootstrap.py index 329e0f37457..1c717baae2b 100644 --- a/src/ai/backend/testutils/bootstrap.py +++ b/src/ai/backend/testutils/bootstrap.py @@ -10,7 +10,7 @@ import subprocess import time from pathlib import Path -from typing import Iterator +from typing import Final, Iterator import pytest @@ -19,24 +19,25 @@ log = logging.getLogger(__spec__.name) # type: ignore[name-defined] +PORT_POOL_BASE: Final = int(os.environ.get("BACKEND_TEST_PORT_POOL_BASE", "10000")) +PORT_POOL_SIZE: Final = int(os.environ.get("BACKEND_TEST_PORT_POOL_SIZE", "1000")) -def get_free_port(): - with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: - s.bind(("", 0)) - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - return s.getsockname()[1] - -def check_if_port_is_clear(host, port): - while True: - try: - s = socket.create_connection((host, port), timeout=0.3) - except (ConnectionRefusedError, TimeoutError): - break +def get_next_tcp_port(num_alloc: int = 1) -> tuple[int, ...]: + lock_path = Path("~/.cache/bai/testing/port.lock").expanduser() + port_path = Path("~/.cache/bai/testing/port.txt").expanduser() + lock_path.parent.mkdir(parents=True, exist_ok=True) + with sync_file_lock(lock_path): + if port_path.exists(): + port_no = int(port_path.read_text()) else: - time.sleep(0.1) - s.close() - continue + port_no = PORT_POOL_BASE + allocated_ports = tuple( + PORT_POOL_BASE + (port_no + i) % PORT_POOL_SIZE for i in range(num_alloc) + ) + port_no = PORT_POOL_BASE + (port_no + num_alloc) % PORT_POOL_SIZE + port_path.write_text(str(port_no)) + return allocated_ports @contextlib.contextmanager @@ -91,9 +92,8 @@ def wait_health_check(container_id): @pytest.fixture(scope="session", autouse=False) def etcd_container() -> Iterator[tuple[str, HostPortPair]]: # Spawn a single-node etcd container for a testing session. - etcd_allocated_port = 9600 + get_parallel_slot() * 8 + 0 random_id = secrets.token_hex(8) - check_if_port_is_clear("127.0.0.1", etcd_allocated_port) + published_port = get_next_tcp_port()[0] proc = subprocess.run( [ "docker", @@ -104,9 +104,7 @@ def etcd_container() -> Iterator[tuple[str, HostPortPair]]: "--name", f"test--etcd-slot-{get_parallel_slot()}-{random_id}", "-p", - f"0.0.0.0:{etcd_allocated_port}:2379", - "-p", - "0.0.0.0::4001", + f"127.0.0.1:{published_port}:2379", "--health-cmd", "etcdctl endpoint health", "--health-interval", @@ -125,9 +123,9 @@ def etcd_container() -> Iterator[tuple[str, HostPortPair]]: container_id = proc.stdout.decode().strip() if not container_id: raise RuntimeError("etcd_container: failed to create container", proc.stderr.decode()) - log.info("spawning etcd container on port %d", etcd_allocated_port) + log.info("spawning etcd container (parallel slot: %d)", get_parallel_slot()) wait_health_check(container_id) - yield container_id, HostPortPair("127.0.0.1", etcd_allocated_port) + yield container_id, HostPortPair("127.0.0.1", published_port) subprocess.run( [ "docker", @@ -143,9 +141,8 @@ def etcd_container() -> Iterator[tuple[str, HostPortPair]]: @pytest.fixture(scope="session", autouse=False) def redis_container() -> Iterator[tuple[str, HostPortPair]]: # Spawn a single-node etcd container for a testing session. - redis_allocated_port = 9600 + get_parallel_slot() * 8 + 1 - check_if_port_is_clear("127.0.0.1", redis_allocated_port) random_id = secrets.token_hex(8) + published_port = get_next_tcp_port()[0] proc = subprocess.run( [ "docker", @@ -158,7 +155,7 @@ def redis_container() -> Iterator[tuple[str, HostPortPair]]: "--name", f"test--redis-slot-{get_parallel_slot()}-{random_id}", "-p", - f"0.0.0.0:{redis_allocated_port}:6379", + f"127.0.0.1:{published_port}:6379", # IMPORTANT: We have intentionally omitted the healthcheck here # to avoid intermittent failures when pausing/unpausing containers. "redis:7-alpine", @@ -168,11 +165,11 @@ def redis_container() -> Iterator[tuple[str, HostPortPair]]: container_id = proc.stdout.decode().strip() if not container_id: raise RuntimeError("redis_container: failed to create container", proc.stderr.decode()) - log.info("spawning redis container on port %d", redis_allocated_port) + log.info("spawning redis container (parallel slot: %d)", get_parallel_slot()) while True: try: with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: - s.connect(("127.0.0.1", redis_allocated_port)) + s.connect(("127.0.0.1", published_port)) s.send(b"*2\r\n$4\r\nPING\r\n$5\r\nhello\r\n") reply = s.recv(128, 0) if not reply.startswith(b"$5\r\nhello\r\n"): @@ -183,7 +180,7 @@ def redis_container() -> Iterator[tuple[str, HostPortPair]]: time.sleep(0.1) continue time.sleep(0.5) - yield container_id, HostPortPair("127.0.0.1", redis_allocated_port) + yield container_id, HostPortPair("127.0.0.1", published_port) subprocess.run( [ "docker", @@ -199,9 +196,8 @@ def redis_container() -> Iterator[tuple[str, HostPortPair]]: @pytest.fixture(scope="session", autouse=False) def postgres_container() -> Iterator[tuple[str, HostPortPair]]: # Spawn a single-node etcd container for a testing session. - postgres_allocated_port = 9600 + get_parallel_slot() * 8 + 2 - check_if_port_is_clear("127.0.0.1", postgres_allocated_port) random_id = secrets.token_hex(8) + published_port = get_next_tcp_port()[0] proc = subprocess.run( [ "docker", @@ -212,7 +208,7 @@ def postgres_container() -> Iterator[tuple[str, HostPortPair]]: "--name", f"test--postgres-slot-{get_parallel_slot()}-{random_id}", "-p", - f"0.0.0.0:{postgres_allocated_port}:5432", + f"127.0.0.1:{published_port}:5432", "-e", "POSTGRES_PASSWORD=develove", "-e", @@ -230,9 +226,9 @@ def postgres_container() -> Iterator[tuple[str, HostPortPair]]: container_id = proc.stdout.decode().strip() if not container_id: raise RuntimeError("postgres_container: failed to create container", proc.stderr.decode()) - log.info("spawning postgres container on port %d", postgres_allocated_port) + log.info("spawning postgres container (parallel slot: %d)", get_parallel_slot()) wait_health_check(container_id) - yield container_id, HostPortPair("127.0.0.1", postgres_allocated_port) + yield container_id, HostPortPair("127.0.0.1", published_port) subprocess.run( [ "docker", diff --git a/src/ai/backend/web/README.md b/src/ai/backend/web/README.md index cc9b05bbdb2..8ba1c2927e9 100644 --- a/src/ai/backend/web/README.md +++ b/src/ai/backend/web/README.md @@ -40,12 +40,12 @@ into the `src/ai/backend/web/static` directory. To download and deploy web UI from pre-built source, do the following: ```console -git submodule init -git submodule update -cd src/ai/backend/web/static -git checkout main # or target branch -git fetch -git pull +cd src/ai/backend/web +curl --fail -sL https://github.com/lablup/backend.ai-webui/releases/download/v$TARGET_VERSION/backend.ai-webui-bundle-$TARGET_VERSION.zip > /tmp/bai-webui.zip +rm -rf static +mkdir static +cd static +unzip /tmp/bai-webui.zip ``` ### Setup configuration for webserver diff --git a/src/ai/backend/wsproxy/BUILD b/src/ai/backend/wsproxy/BUILD new file mode 100644 index 00000000000..885ab4e0e2f --- /dev/null +++ b/src/ai/backend/wsproxy/BUILD @@ -0,0 +1,85 @@ +python_sources( + name="src", + dependencies=[ + "src/ai/backend/wsproxy/cli:src", # not auto-inferred (due to lazy-loading cmdgroup) + "src/ai/backend/wsproxy/api:src", # not auto-inferred (due to lazy-loading cmdgroup) + ":resources", + ], +) + +visibility_private_component( + allowed_dependents=[], + allowed_dependencies=[ + "//src/ai/backend/cli/**", + "//src/ai/backend/common/**", + ], +) + +python_distribution( + name="dist", + dependencies=[ + ":src", + "!!stubs/trafaret:stubs", + ], + provides=python_artifact( + name="backend.ai-wsproxy", + description="Backend.AI WSProxy", + license="LGPLv3", + ), + entry_points={ + "backendai_cli_v10": { + "wsproxy": "ai.backend.wsproxy.cli.__main__:main", + "wsproxy.start-server": "ai.backend.wsproxy.server:main", + }, + }, + generate_setup=True, + tags=["wheel"], +) + +pex_binary( + name="pex", + entry_point="ai.backend.cli.__main__", + dependencies=[ + ":src", + ":buildscript", + "!!stubs/trafaret:stubs", + ], +) + +scie_binary( + name="backendai-wsproxy", + fat=False, + dependencies=[":pex"], + tags=["scie", "lazy"], +) + +scie_binary( + name="backendai-wsproxy-fat", + fat=True, + dependencies=[":pex"], + tags=["scie", "fat"], +) + +run_shell_command( + name="checksum", + command=( + "find . -name 'backendai-wsproxy-*' -not -name '*.sha256' | xargs -I {} sh -c" + " 'sha256sum {} > {}.sha256'" + ), + workdir="/dist", + execution_dependencies=[ + ":backendai-wsproxy", + ":backendai-wsproxy-fat", + ], + tags=["checksum"], +) + +resource(name="version", source="VERSION") +resource(name="buildscript", source="BUILD") +resources( + name="resources", + dependencies=[ + ":version", + ], + sources=["**/py.typed", "templates/**"], +) diff --git a/src/ai/backend/wsproxy/README.md b/src/ai/backend/wsproxy/README.md new file mode 100644 index 00000000000..9277c6c3d9a --- /dev/null +++ b/src/ai/backend/wsproxy/README.md @@ -0,0 +1,2 @@ +# Backend.AI WSProxy + diff --git a/src/ai/backend/wsproxy/VERSION b/src/ai/backend/wsproxy/VERSION new file mode 120000 index 00000000000..a4e948506b8 --- /dev/null +++ b/src/ai/backend/wsproxy/VERSION @@ -0,0 +1 @@ +../../../../VERSION \ No newline at end of file diff --git a/src/ai/backend/wsproxy/__init__.py b/src/ai/backend/wsproxy/__init__.py new file mode 100644 index 00000000000..67adea593b1 --- /dev/null +++ b/src/ai/backend/wsproxy/__init__.py @@ -0,0 +1,3 @@ +from pathlib import Path + +__version__ = (Path(__file__).parent / "VERSION").read_text().strip() diff --git a/src/ai/backend/wsproxy/api/BUILD b/src/ai/backend/wsproxy/api/BUILD new file mode 100644 index 00000000000..73574424040 --- /dev/null +++ b/src/ai/backend/wsproxy/api/BUILD @@ -0,0 +1 @@ +python_sources(name="src") diff --git a/src/ai/backend/wsproxy/api/circuit.py b/src/ai/backend/wsproxy/api/circuit.py new file mode 100644 index 00000000000..98f1059a644 --- /dev/null +++ b/src/ai/backend/wsproxy/api/circuit.py @@ -0,0 +1,55 @@ +from typing import Iterable +from uuid import UUID + +import aiohttp_cors +from aiohttp import web + +from ai.backend.wsproxy.types import ( + CORSOptions, + PydanticResponse, + WebMiddleware, +) + +from ..defs import RootContext +from ..exceptions import ObjectNotFound +from .types import StubResponseModel +from .utils import auth_required, pydantic_api_response_handler + + +@auth_required("worker") +@pydantic_api_response_handler +async def delete_circuit(request: web.Request) -> PydanticResponse[StubResponseModel]: + """ + Removes circuit record from wsproxy. + """ + root_ctx: RootContext = request.app["_root.context"] + circuit_id = UUID(request.match_info["circuit_id"]) + + try: + circuit = root_ctx.proxy_frontend.circuits[circuit_id] + except KeyError: + raise ObjectNotFound(object_name="Circuit") + await root_ctx.proxy_frontend.break_circuit(circuit) + + return PydanticResponse(StubResponseModel(success=True)) + + +async def init(app: web.Application) -> None: + pass + + +async def shutdown(app: web.Application) -> None: + pass + + +def create_app( + default_cors_options: CORSOptions, +) -> tuple[web.Application, Iterable[WebMiddleware]]: + app = web.Application() + app["prefix"] = "api/circuit" + app.on_startup.append(init) + app.on_shutdown.append(shutdown) + cors = aiohttp_cors.setup(app, defaults=default_cors_options) + add_route = app.router.add_route + cors.add(add_route("DELETE", "/{circuit_id}", delete_circuit)) + return app, [] diff --git a/src/ai/backend/wsproxy/api/conf.py b/src/ai/backend/wsproxy/api/conf.py new file mode 100644 index 00000000000..7f9f14589be --- /dev/null +++ b/src/ai/backend/wsproxy/api/conf.py @@ -0,0 +1,70 @@ +from logging import LoggerAdapter +from typing import Iterable + +import aiohttp_cors +import jwt +from aiohttp import web +from pydantic import BaseModel + +from ..defs import RootContext +from ..types import CORSOptions, PydanticResponse, WebMiddleware +from ..utils import ensure_json_serializable +from .types import ConfRequestModel +from .utils import pydantic_api_handler + + +class TokenResponseModel(BaseModel): + token: str + + +@pydantic_api_handler(ConfRequestModel) +async def conf_v2( + request: web.Request, params: ConfRequestModel +) -> PydanticResponse[TokenResponseModel]: + """ + Generates and returns a token which will be used as an authentication credential for + /v2/proxy/{token}/{session}/add request. + """ + log: LoggerAdapter = request["log"] + + root_ctx: RootContext = request.app["_root.context"] + + assert params.session.id and params.session.access_key, "Not meant for inference apps" + + token = jwt.encode( + ensure_json_serializable({ + "login_session_token": params.login_session_token, + "kernel_host": params.kernel_host, + "kernel_port": params.kernel_port, + "session_id": params.session.id, + "user_uuid": params.session.user_uuid, + "group_id": params.session.group_id, + "access_key": params.session.access_key, + "domain_name": params.session.domain_name, + }), + root_ctx.local_config.wsproxy.jwt_encrypt_key, + ) + log.debug("built token with body {}", params.model_dump()) + + return PydanticResponse(TokenResponseModel(token=token)) + + +async def init(app: web.Application) -> None: + pass + + +async def shutdown(app: web.Application) -> None: + pass + + +def create_app( + default_cors_options: CORSOptions, +) -> tuple[web.Application, Iterable[WebMiddleware]]: + app = web.Application() + app["prefix"] = "v2/conf" + app.on_startup.append(init) + app.on_shutdown.append(shutdown) + cors = aiohttp_cors.setup(app, defaults=default_cors_options) + root_resource = cors.add(app.router.add_resource(r"")) + cors.add(root_resource.add_route("POST", conf_v2)) + return app, [] diff --git a/src/ai/backend/wsproxy/api/endpoint.py b/src/ai/backend/wsproxy/api/endpoint.py new file mode 100644 index 00000000000..51af388e2e9 --- /dev/null +++ b/src/ai/backend/wsproxy/api/endpoint.py @@ -0,0 +1,205 @@ +import textwrap +from datetime import datetime +from typing import Annotated, Iterable +from uuid import UUID + +import aiohttp_cors +import jwt +from aiohttp import web +from pydantic import AnyUrl, BaseModel, Field + +from ai.backend.wsproxy.exceptions import ObjectNotFound +from ai.backend.wsproxy.types import ( + AppMode, + CORSOptions, + EndpointConfig, + ProxyProtocol, + PydanticResponse, + RouteInfo, + WebMiddleware, +) + +from ..defs import RootContext +from ..registry import add_circuit +from ..types import SessionConfig +from .types import StubResponseModel +from .utils import ( + auth_required, + pydantic_api_handler, + pydantic_api_response_handler, +) + + +class EndpointTagConfig(BaseModel): + session: SessionConfig + endpoint: EndpointConfig + + +class InferenceAppConfig(BaseModel): + session_id: UUID + kernel_host: str + kernel_port: int + protocol: Annotated[ProxyProtocol, Field(default=ProxyProtocol.HTTP)] + traffic_ratio: Annotated[float, Field(ge=0.0, le=1.0, default=1.0)] + + +class EndpointCreationRequestModel(BaseModel): + service_name: Annotated[str, Field(description="Name of the model service.")] + tags: Annotated[ + EndpointTagConfig, + Field( + description="Metadata of target model service and dependent sessions.", + ), + ] + apps: Annotated[ + dict[str, list[InferenceAppConfig]], + Field( + description=textwrap.dedent( + """ + key-value pair of available applications exposed by requested endpoint. + Key should be name of the app, and value as list of host-port pairs app is bound to. + """ + ), + ), + ] + open_to_public: Annotated[ + bool, + Field( + default=False, + description=textwrap.dedent( + """ + If set to true, AppProxy will require an API token (which can be obtained from `generate_endpoint_api_token` request) + fullfilled at request header. + """ + ), + ), + ] + + port: Annotated[int | None, Field(default=None, description="Preferred port number.")] + subdomain: Annotated[str | None, Field(default=None, description="Preferred subdomain name.")] + + +class EndpointCreationResponseModel(BaseModel): + endpoint: AnyUrl + + +@auth_required("manager") +@pydantic_api_handler(EndpointCreationRequestModel) +async def create_or_update_endpoint( + request: web.Request, params: EndpointCreationRequestModel +) -> PydanticResponse[EndpointCreationResponseModel]: + """ + Creates or updates an inference circuit. + """ + root_ctx: RootContext = request.app["_root.context"] + endpoint_id = UUID(request.match_info["endpoint_id"]) + + app_names = list(params.apps.keys()) + if len(app_names) > 0: + app = list(params.apps.keys())[0] + routes = [RouteInfo(**r.model_dump()) for r in params.apps[app]] + else: + app = "" + routes = [] + + try: + circuit = root_ctx.proxy_frontend.get_circuit_by_endpoint_id(endpoint_id) + circuit.route_info = routes + circuit.session_ids = [r.session_id for r in routes] + circuit.open_to_public = params.open_to_public + await root_ctx.proxy_frontend.update_circuit_route_info(circuit, routes) + except ObjectNotFound: + circuit = await add_circuit( + root_ctx, + params.tags.session, + params.tags.endpoint, + app, + params.apps[app][0].protocol if app else ProxyProtocol.HTTP, + AppMode.INFERENCE, + routes, + open_to_public=params.open_to_public, + ) + endpoint_url = f"http://{root_ctx.local_config.wsproxy.advertised_host}:{circuit.port}" + + return PydanticResponse(EndpointCreationResponseModel(endpoint=AnyUrl(endpoint_url))) + + +@auth_required("manager") +@pydantic_api_response_handler +async def remove_endpoint(request: web.Request) -> PydanticResponse[StubResponseModel]: + """ + Deassociates inference circuit from system. + """ + root_ctx: RootContext = request.app["_root.context"] + + endpoint_id = UUID(request.match_info["endpoint_id"]) + + circuit = root_ctx.proxy_frontend.get_circuit_by_endpoint_id(endpoint_id) + await root_ctx.proxy_frontend.break_circuit(circuit) + + return PydanticResponse(StubResponseModel(success=True)) + + +class EndpointAPITokenGenerationRequestModel(BaseModel): + user_uuid: UUID + """ + Token requester's user UUID. + """ + exp: datetime + """ + Expiration date of token. + """ + + +class EndpointAPITokenResponseModel(BaseModel): + token: str + + +@auth_required("manager") +@pydantic_api_handler(EndpointAPITokenGenerationRequestModel) +async def generate_endpoint_api_token( + request: web.Request, params: EndpointAPITokenGenerationRequestModel +) -> PydanticResponse[EndpointAPITokenResponseModel]: + """ + Creates and returns API token required for execution of model service apps hosted by AppProxy. + This API is meant to be called from Backend.AI manager rather than model service callee itself. + """ + root_ctx: RootContext = request.app["_root.context"] + + endpoint_id = UUID(request.match_info["endpoint_id"]) + + circuit = root_ctx.proxy_frontend.get_circuit_by_endpoint_id(endpoint_id) + await root_ctx.proxy_frontend.break_circuit(circuit) + payload = circuit.model_dump(mode="json") + payload["config"] = {} + payload["app_url"] = f"http://{root_ctx.local_config.wsproxy.advertised_host}:{circuit.port}" + payload["user"] = str(params.user_uuid) + payload["exp"] = params.exp + encoded_jwt = jwt.encode( + payload, root_ctx.local_config.wsproxy.jwt_encrypt_key, algorithm="HS256" + ) + return PydanticResponse(EndpointAPITokenResponseModel(token=encoded_jwt)) + + +async def init(app: web.Application) -> None: + pass + + +async def shutdown(app: web.Application) -> None: + pass + + +def create_app( + default_cors_options: CORSOptions, +) -> tuple[web.Application, Iterable[WebMiddleware]]: + app = web.Application() + app["prefix"] = "v2/endpoints" + app.on_startup.append(init) + app.on_shutdown.append(shutdown) + cors = aiohttp_cors.setup(app, defaults=default_cors_options) + add_route = app.router.add_route + cors.add(app.router.add_resource(r"")) + cors.add(add_route("POST", "/{endpoint_id}", create_or_update_endpoint)) + cors.add(add_route("DELETE", "/{endpoint_id}", remove_endpoint)) + cors.add(add_route("POST", "/{endpoint_id}/token", generate_endpoint_api_token)) + return app, [] diff --git a/src/ai/backend/wsproxy/api/proxy.py b/src/ai/backend/wsproxy/api/proxy.py new file mode 100644 index 00000000000..b8c62710eff --- /dev/null +++ b/src/ai/backend/wsproxy/api/proxy.py @@ -0,0 +1,277 @@ +import urllib.parse +from logging import LoggerAdapter +from typing import Annotated, Iterable +from uuid import UUID + +import aiohttp_cors +import jwt +from aiohttp import web +from pydantic import AnyUrl, BaseModel, Field + +from ..defs import RootContext +from ..exceptions import ( + GenericForbidden, + InvalidCredentials, + ObjectNotFound, +) +from ..registry import add_circuit +from ..types import ( + AppMode, + Circuit, + CORSOptions, + ProxyProtocol, + PydanticResponse, + RouteInfo, + SessionConfig, + WebMiddleware, +) +from ..utils import is_permit_valid, mime_match +from .types import ConfRequestModel, StubResponseModel +from .utils import pydantic_api_handler, pydantic_api_response_handler + + +class AddRequestModel(BaseModel): + app: str + protocol: ProxyProtocol + envs: Annotated[dict[str, str | int | None], Field(default={})] + args: Annotated[str | None, Field(default=None)] + open_to_public: Annotated[bool, Field(default=False)] + allowed_client_ips: Annotated[str | None, Field(default=None)] + redirect: Annotated[str, Field(default="")] + no_reuse: Annotated[bool, Field(default=False)] + + port: Annotated[int | None, Field(default=None)] + subdomain: Annotated[str | None, Field(default=None)] + + +class ProxyRequestModel(AddRequestModel): + token: str + session_id: UUID + + +class AddResponseModel(BaseModel): + code: int + url: AnyUrl + + +class ProxyResponseModel(BaseModel): + redirect_url: AnyUrl + reuse: bool + + +class TokenBodyModel(ConfRequestModel): + exp: int + + +@pydantic_api_response_handler +async def check_session_existence(request: web.Request) -> PydanticResponse[StubResponseModel]: + root_ctx: RootContext = request.app["_root.context"] + try: + session_id = UUID(request.match_info["session_id"]) + except ValueError: + raise ObjectNotFound(object_name="Circuit") + + token = request.match_info["token"] + + for _, circuit in root_ctx.proxy_frontend.circuits.items(): + if session_id in circuit.session_ids and ( + token == circuit.access_key or token == circuit.user_id + ): + break + else: + raise ObjectNotFound(object_name="Circuit") + + return PydanticResponse(StubResponseModel(success=True)) + + +@pydantic_api_response_handler +async def delete_circuit_by_session(request: web.Request) -> PydanticResponse[StubResponseModel]: + root_ctx: RootContext = request.app["_root.context"] + try: + session_id = UUID(request.match_info["session_id"]) + except ValueError: + raise ObjectNotFound(object_name="Circuit") + + token = request.match_info["token"] + + for _, circuit in root_ctx.proxy_frontend.circuits.items(): + if session_id in circuit.session_ids and ( + token == circuit.access_key or token == str(circuit.user_id) + ): + break + else: + raise ObjectNotFound(object_name="Circuit") + permit_key = request.query.get("permit_key") + if ( + not permit_key + or not circuit.user_id + or ( + not is_permit_valid( + root_ctx.local_config.wsproxy.permit_hash_key, circuit.user_id, permit_key + ) + ) + ): + raise GenericForbidden + + await root_ctx.proxy_frontend.break_circuit(circuit) + return PydanticResponse(StubResponseModel(success=True)) + + +@pydantic_api_handler(AddRequestModel, is_deprecated=True) +async def add(request: web.Request, params: AddRequestModel) -> PydanticResponse[AddResponseModel]: + """ + Deprecated: only for legacy applications. Just call `proxy` API directly. + Returns URL to AppProxy's `proxy` API handler. + """ + root_ctx: RootContext = request.app["_root.context"] + + config = root_ctx.local_config.wsproxy + base_url = ( + f"http://{config.advertised_host}:{config.advertised_api_port or config.bind_api_port}" + ) + qdict = { + **params.model_dump(mode="json", exclude_defaults=True), + "token": request.match_info["token"], + "session_id": request.match_info["session_id"], + } + return PydanticResponse( + AddResponseModel( + code=200, url=AnyUrl(f"{base_url}/v2/proxy/auth?{urllib.parse.urlencode(qdict)}") + ), + ) + + +@pydantic_api_handler(ProxyRequestModel) +async def proxy( + request: web.Request, params: ProxyRequestModel +) -> PydanticResponse[ProxyResponseModel] | web.HTTPPermanentRedirect: + """ + Assigns worker to host proxy app and starts proxy process. + When `Accept` HTTP header is set to `application/json` access information to worker will be handed out inside response body; + otherwise wsproxy will try to automatically redirect callee via `Location: ` response header. + """ + log: LoggerAdapter = request["log"] + + existing_circuit: Circuit | None = None + circuit: Circuit + reuse = False + + root_ctx: RootContext = request.app["_root.context"] + token_str = params.token + session_id = params.session_id + + token = jwt.decode( + token_str, root_ctx.local_config.wsproxy.jwt_encrypt_key, algorithms=["HS256"] + ) + + if token["session_id"] != str(session_id): + log.warn( + "User requested to create app of session {} but token authorizes session {}", + session_id, + token["session_id"], + ) + raise InvalidCredentials + + for _, circuit in root_ctx.proxy_frontend.circuits.items(): + if ( + token["session_id"] in circuit.session_ids + and token["app"] == circuit.app + and token.get("open_to_public", False) == circuit.open_to_public + and token["allowed_client_ip"] == circuit.allowed_client_ips + ) and not params.no_reuse: + existing_circuit = circuit + break + + if existing_circuit: + reuse = True + routes = existing_circuit.route_info + circuit = existing_circuit + else: + routes = [ + RouteInfo( + session_id=token["session_id"], + session_name=None, + kernel_host=token["kernel_host"], + kernel_port=token["kernel_port"], + protocol=params.protocol, + traffic_ratio=1.0, + ) + ] + + log.debug("protocol: {} ({})", params.protocol, type(params.protocol)) + if params.protocol == ProxyProtocol.PREOPEN: + log.debug("overriding PREOPEN to HTTP") + params.protocol = ProxyProtocol.HTTP + + circuit = await add_circuit( + root_ctx, + SessionConfig( + id=token["session_id"], + user_uuid=token["user_uuid"], + group_id=token["group_id"], + access_key=token["access_key"], + domain_name=token["domain_name"], + ), + None, + params.app, + params.protocol, + AppMode.INTERACTIVE, + routes, + envs=params.envs, + args=params.args, + open_to_public=params.open_to_public, + allowed_client_ips=params.allowed_client_ips, + ) + log.debug("created new circuit") + + token_to_generate_body = { + "version": "v2", # TODO: add support for v1 + "redirect": params.redirect, + "circuit": str(circuit.id), + } + qdict = { + "token": jwt.encode(token_to_generate_body, root_ctx.local_config.wsproxy.jwt_encrypt_key), + } + + port = ( + root_ctx.local_config.wsproxy.advertised_api_port + or root_ctx.local_config.wsproxy.bind_api_port + ) + app_url = f"http://{root_ctx.local_config.wsproxy.advertised_host}:{port}/setup?{urllib.parse.urlencode(qdict)}" + log.debug("Redirect URL created: {}", app_url) + + if mime_match(request.headers.get("accept", "text/html"), "application/json", strict=True): + # Web browsers block redirect between cross-origins if Access-Control-Allow-Origin value is set to a concrete Origin instead of wildcard; + # Hence we need to send "*" as allowed origin manually, instead of benefiting from aiohttp-cors + return PydanticResponse( + ProxyResponseModel( + redirect_url=AnyUrl(app_url), + # Current version of WebUI always expects this to be False for TCP protocols + reuse=reuse if params.protocol != ProxyProtocol.TCP else False, + ), + ) + else: + return web.HTTPPermanentRedirect(app_url) + + +async def init(app: web.Application) -> None: + pass + + +async def shutdown(app: web.Application) -> None: + pass + + +def create_app( + default_cors_options: CORSOptions, +) -> tuple[web.Application, Iterable[WebMiddleware]]: + app = web.Application() + app["prefix"] = "v2/proxy" + app.on_startup.append(init) + app.on_shutdown.append(shutdown) + cors = aiohttp_cors.setup(app, defaults=default_cors_options) + cors.add(app.router.add_route("GET", "/{token}/{session_id}", check_session_existence)) + cors.add(app.router.add_route("GET", "/{token}/{session_id}/add", add)) + cors.add(app.router.add_route("GET", "/{token}/{session_id}/delete", delete_circuit_by_session)) + cors.add(app.router.add_route("GET", "/auth", proxy)) + return app, [] diff --git a/src/ai/backend/wsproxy/api/setup.py b/src/ai/backend/wsproxy/api/setup.py new file mode 100644 index 00000000000..ac21a9e42b6 --- /dev/null +++ b/src/ai/backend/wsproxy/api/setup.py @@ -0,0 +1,135 @@ +import urllib.parse +from logging import LoggerAdapter +from typing import Iterable +from uuid import UUID + +import jwt +from aiohttp import web +from pydantic import AnyUrl, BaseModel + +from ..config import ServerConfig +from ..defs import RootContext +from ..exceptions import ( + InvalidAPIParameters, +) +from ..types import ( + PERMIT_COOKIE_NAME, + Circuit, + CORSOptions, + InteractiveAppInfo, + ProxyProtocol, + PydanticResponse, + WebMiddleware, +) +from ..utils import calculate_permit_hash +from .utils import pydantic_api_handler + + +def generate_proxy_url(local_config: ServerConfig, protocol: str, circuit: Circuit) -> str: + config = local_config.wsproxy + if config.advertised_proxy_port_range: + idx = config.bind_proxy_port_range.index(circuit.port) + port = config.advertised_proxy_port_range[idx] + else: + port = circuit.port + return f"{protocol}://{config.advertised_host}:{port}" + + +class ProxySetupRequestModel(BaseModel): + token: str + + +class ProxySetupResponseModel(BaseModel): + redirect: AnyUrl + redirectURI: AnyUrl + + +@pydantic_api_handler(ProxySetupRequestModel) +async def setup( + request: web.Request, params: ProxySetupRequestModel +) -> web.StreamResponse | PydanticResponse[ProxySetupResponseModel]: + log: LoggerAdapter = request["log"] + + try: + root_ctx: RootContext = request.app["_root.context"] + jwt_body = jwt.decode( + params.token, root_ctx.local_config.wsproxy.jwt_encrypt_key, algorithms=["HS256"] + ) + requested_circuit_id = UUID(jwt_body["circuit"]) + + circuit = root_ctx.proxy_frontend.get_circuit_by_id(requested_circuit_id) + + if not isinstance(circuit.app_info, InteractiveAppInfo): + raise InvalidAPIParameters("E20011: Not supported for inference apps") + + # Web browsers block redirect between cross-origins if Access-Control-Allow-Origin value is set to a concrete Origin instead of wildcard; + # Hence we need to send "*" as allowed origin manually, instead of benefiting from aiohttp-cors + cors_headers = { + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Headers": "*", + "Access-Control-Expose-Headers": "*", + } + match circuit.protocol: + case ProxyProtocol.HTTP: + protocol = "http" + response = web.HTTPPermanentRedirect( + generate_proxy_url(root_ctx.local_config, protocol, circuit), + headers=cors_headers, + ) + response.set_cookie( + PERMIT_COOKIE_NAME, + calculate_permit_hash( + root_ctx.local_config.wsproxy.permit_hash_key, circuit.app_info.user_id + ), + ) + return response + case ProxyProtocol.TCP: + protocol = "tcp" + queryparams = { + "directTCP": "true", + "auth": params.token, + "proto": protocol, + "gateway": generate_proxy_url(root_ctx.local_config, protocol, circuit), + } + if jwt_body["redirect"]: + return web.HTTPPermanentRedirect( + f"http://localhost:45678/start?{urllib.parse.urlencode(queryparams)}", + headers=cors_headers, + ) + else: + return PydanticResponse( + ProxySetupResponseModel( + redirect=AnyUrl( + f"http://localhost:45678/start?{urllib.parse.urlencode(queryparams)}" + ), + redirectURI=AnyUrl( + f"http://localhost:45678/start?{urllib.parse.urlencode(queryparams)}" + ), + ), + headers=cors_headers, + ) + case _: + raise InvalidAPIParameters("E20002: Protocol not available as interactive app") + except: + log.exception("") + raise + + +async def init(app: web.Application) -> None: + pass + + +async def shutdown(app: web.Application) -> None: + pass + + +def create_app( + default_cors_options: CORSOptions, +) -> tuple[web.Application, Iterable[WebMiddleware]]: + app = web.Application() + app["prefix"] = "setup" + app.on_startup.append(init) + app.on_shutdown.append(shutdown) + add_route = app.router.add_route + add_route("GET", "", setup) + return app, [] diff --git a/src/ai/backend/wsproxy/api/types.py b/src/ai/backend/wsproxy/api/types.py new file mode 100644 index 00000000000..4d8c3efa393 --- /dev/null +++ b/src/ai/backend/wsproxy/api/types.py @@ -0,0 +1,36 @@ +from typing import Annotated +from uuid import UUID + +from pydantic import BaseModel, Field + +from ..types import ( + Circuit, + FrontendMode, + SessionConfig, +) + + +class SlotModel(BaseModel): + frontend_mode: FrontendMode + in_use: bool + port: Annotated[int | None, Field(default=None)] + subdomain: Annotated[str | None, Field(default=None)] + circuit_id: Annotated[UUID | None, Field(default=None)] + """ + ID of circuit slot is hosting. + """ + + +class StubResponseModel(BaseModel): + success: Annotated[bool, Field(default=True)] + + +class CircuitListResponseModel(BaseModel): + circuits: list[Circuit] + + +class ConfRequestModel(BaseModel): + login_session_token: str | None + kernel_host: str + kernel_port: int + session: SessionConfig diff --git a/src/ai/backend/wsproxy/api/utils.py b/src/ai/backend/wsproxy/api/utils.py new file mode 100644 index 00000000000..da5bc8cc835 --- /dev/null +++ b/src/ai/backend/wsproxy/api/utils.py @@ -0,0 +1,254 @@ +import functools +import inspect +import json +import time +from collections import defaultdict +from typing import ( + Any, + Awaitable, + Callable, + Hashable, + Literal, + Mapping, + TypeAlias, + TypeVar, +) + +import yaml +from aiohttp import web, web_response +from aiohttp.typedefs import Handler +from pydantic import BaseModel, ValidationError + +from ai.backend.common.logging import BraceStyleAdapter +from ai.backend.wsproxy.defs import RootContext +from ai.backend.wsproxy.exceptions import AuthorizationFailed + +from ..exceptions import InvalidAPIParameters +from ..types import PydanticResponse + + +def auth_required(scope: Literal["manager"] | Literal["worker"]) -> Callable[[Handler], Handler]: + def wrap(handler: Handler) -> Handler: + @functools.wraps(handler) + async def wrapped(request: web.Request, *args, **kwargs): + root_ctx: RootContext = request.app["_root.context"] + permitted_token = root_ctx.local_config.wsproxy.api_secret + permitted_header_values = ( + permitted_token, + f"Bearer {permitted_token}", + f"BackendAI {permitted_token}", + ) + token_to_evaluate = request.headers.get("X-BackendAI-Token") + if token_to_evaluate not in permitted_header_values: + raise AuthorizationFailed("Unauthorized access") + return await handler(request, *args, **kwargs) + + original_attrs = getattr(handler, "_backend_attrs", {}) + for k, v in original_attrs.items(): + set_handler_attr(wrapped, k, v) + + set_handler_attr(wrapped, "auth_scope", scope) + return wrapped + + return wrap + + +# FIXME: merge majority of common definitions to ai.backend.common when ready + +_danger_words = ["password", "passwd", "secret"] + + +def set_handler_attr(func, key, value): + attrs = getattr(func, "_backend_attrs", None) + if attrs is None: + attrs = {} + attrs[key] = value + setattr(func, "_backend_attrs", attrs) + + +def get_handler_attr(request, key, default=None): + # When used in the aiohttp server-side codes, we should use + # request.match_info.hanlder instead of handler passed to the middleware + # functions because aiohttp wraps this original handler with functools.partial + # multiple times to implement its internal middleware processing. + attrs = getattr(request.match_info.handler, "_backend_attrs", None) + if attrs is not None: + return attrs.get(key, default) + return default + + +def mask_sensitive_keys(data: Mapping[str, Any]) -> Mapping[str, Any]: + """ + Returns a new cloned mapping by masking the values of + sensitive keys with "***" from the given mapping. + """ + sanitized = dict() + for k, v in data.items(): + if any((w in k.lower()) for w in _danger_words): + sanitized[k] = "***" + else: + sanitized[k] = v + return sanitized + + +_burst_last_call: float = 0.0 +_burst_times: dict[Hashable, float] = dict() +_burst_counts: dict[Hashable, int] = defaultdict(int) + + +async def call_non_bursty( + key: Hashable, + coro: Callable[[], Any], + *, + max_bursts: int = 64, + max_idle: int | float = 100.0, +): + """ + Execute a coroutine once upon max_bursts bursty invocations or max_idle + milliseconds after bursts smaller than max_bursts. + """ + global _burst_last_call, _burst_times, _burst_counts + if inspect.iscoroutine(coro): + # Coroutine objects may not be called before garbage-collected + # as this function throttles the frequency of invocation. + # That will generate a bogus warning by the asyncio's debug facility. + raise TypeError("You must pass coroutine function, not coroutine object.") + now = time.monotonic() + + if now - _burst_last_call > 3.0: + # garbage-collect keys + cleaned_keys = [] + for k, tick in _burst_times.items(): + if now - tick > (max_idle / 1e3): + cleaned_keys.append(k) + for k in cleaned_keys: + del _burst_times[k] + _burst_counts.pop(k, None) + + last_called = _burst_times.get(key, 0) + _burst_times[key] = now + _burst_last_call = now + invoke = False + + if now - last_called > (max_idle / 1e3): + invoke = True + _burst_counts.pop(key, None) + else: + _burst_counts[key] += 1 + if _burst_counts[key] >= max_bursts: + invoke = True + del _burst_counts[key] + + if invoke: + if inspect.iscoroutinefunction(coro): + return await coro() + else: + return coro() + + +TAnyResponse = TypeVar("TAnyResponse", bound=web.StreamResponse) + +TParamModel = TypeVar("TParamModel", bound=BaseModel) +TQueryModel = TypeVar("TQueryModel", bound=BaseModel) +TResponseModel = TypeVar("TResponseModel", bound=BaseModel) + +THandlerFuncWithoutParam: TypeAlias = Callable[ + [web.Request], Awaitable[PydanticResponse | TAnyResponse] +] +THandlerFuncWithParam: TypeAlias = Callable[ + [web.Request, TParamModel], Awaitable[PydanticResponse | TAnyResponse] +] + + +def ensure_stream_response_type( + response: PydanticResponse | TAnyResponse, +) -> web.StreamResponse: + json_body: Any + match response: + case PydanticResponse(): + match response.response: + case BaseModel(): + json_body = response.response.model_dump(mode="json") + case _: + raise RuntimeError(f"Unsupported model type ({type(response.response)})") + return web.json_response(json_body, headers=response.headers, status=response.status) + case web_response.StreamResponse(): + return response + case _: + raise RuntimeError(f"Unsupported response type ({type(response)})") + + +def pydantic_api_response_handler( + handler: THandlerFuncWithoutParam, + is_deprecated=False, +) -> Handler: + """ + Only for API handlers which does not require request body. + For handlers with params to consume use @pydantic_params_api_handler() or + @check_api_params() decorator (only when request param is validated with trafaret). + """ + + @functools.wraps(handler) + async def wrapped( + request: web.Request, + *args, + **kwargs, + ) -> web.StreamResponse: + response = await handler(request, *args, **kwargs) + return ensure_stream_response_type(response) + + set_handler_attr(wrapped, "deprecated", is_deprecated) + return wrapped + + +def pydantic_api_handler( + checker: type[TParamModel], + loads: Callable[[str], Any] | None = None, + query_param_checker: type[TQueryModel] | None = None, + is_deprecated=False, +) -> Callable[[THandlerFuncWithParam], Handler]: + def wrap( + handler: THandlerFuncWithParam, + ) -> Handler: + @functools.wraps(handler) + async def wrapped( + request: web.Request, + *args, + **kwargs, + ) -> web.StreamResponse: + orig_params: Any + body: str = "" + log: BraceStyleAdapter = request["log"] + try: + body_exists = request.can_read_body + if body_exists: + body = await request.text() + if request.content_type == "text/yaml": + orig_params = yaml.load(body, Loader=yaml.BaseLoader) + else: + orig_params = (loads or json.loads)(body) + else: + orig_params = dict(request.query) + stripped_params = orig_params.copy() + log.debug("stripped raw params: {}", mask_sensitive_keys(stripped_params)) + checked_params = checker.model_validate(stripped_params) + if body_exists and query_param_checker: + query_params = query_param_checker.model_validate(request.query) + kwargs["query"] = query_params + except (json.decoder.JSONDecodeError, yaml.YAMLError, yaml.MarkedYAMLError): + raise InvalidAPIParameters("Malformed body") + except ValidationError as e: + raise InvalidAPIParameters("Input validation error", extra_data=e.errors()) + result = await handler(request, checked_params, *args, **kwargs) + return ensure_stream_response_type(result) + + original_attrs = getattr(handler, "_backend_attrs", {}) + for k, v in original_attrs.items(): + set_handler_attr(wrapped, k, v) + + set_handler_attr(wrapped, "request_scheme", checker) + set_handler_attr(wrapped, "deprecated", is_deprecated) + + return wrapped + + return wrap diff --git a/src/ai/backend/wsproxy/cli/BUILD b/src/ai/backend/wsproxy/cli/BUILD new file mode 100644 index 00000000000..73574424040 --- /dev/null +++ b/src/ai/backend/wsproxy/cli/BUILD @@ -0,0 +1 @@ +python_sources(name="src") diff --git a/src/ai/backend/wsproxy/cli/__main__.py b/src/ai/backend/wsproxy/cli/__main__.py new file mode 100644 index 00000000000..e56d2627916 --- /dev/null +++ b/src/ai/backend/wsproxy/cli/__main__.py @@ -0,0 +1,96 @@ +import asyncio +import importlib +import json +from pathlib import Path +from typing import Any + +import aiohttp_cors +import click +import tomlkit +from aiohttp import web +from setproctitle import setproctitle + +from ai.backend.common.types import LogSeverity + +from ..config import ServerConfig, generate_example_json +from ..openapi import generate_openapi +from ..utils import ensure_json_serializable + + +@click.group(invoke_without_command=False, context_settings={"help_option_names": ["-h", "--help"]}) +@click.option( + "--log-level", + type=click.Choice([*LogSeverity.__members__.keys()], case_sensitive=False), + default="INFO", + help="Set the logging verbosity level", +) +@click.pass_context +def main( + ctx: click.Context, + log_level: str, +) -> None: + """ + Backend.AI WSProxy CLI + """ + setproctitle("backend.ai: wsproxy.cli") + + +@main.command() +@click.option( + "--output", + "-o", + default="-", + type=click.Path(dir_okay=False, writable=True), + help="Output file path (default: stdout)", +) +def generate_example_configuration(output: Path) -> None: + """ + Generates example TOML configuration file for Backend.AI Proxy Coordinator. + """ + generated_example = generate_example_json(ServerConfig) + if output == "-" or output is None: + print(tomlkit.dumps(ensure_json_serializable(generated_example))) + else: + with open(output, mode="w") as fw: + fw.write(tomlkit.dumps(ensure_json_serializable(generated_example))) + + +async def _generate() -> dict[str, Any]: + from ..server import global_subapp_pkgs + + cors_options = { + "*": aiohttp_cors.ResourceOptions( + allow_credentials=False, expose_headers="*", allow_headers="*" + ), + } + + subapps: list[web.Application] = [] + for subapp in global_subapp_pkgs: + pkg = importlib.import_module("ai.backend.wsproxy.api" + subapp) + app, _ = pkg.create_app(cors_options) + subapps.append(app) + return generate_openapi("Proxy Coordinator", subapps, verbose=True) + + +@main.command() +@click.option( + "--output", + "-o", + default="-", + type=click.Path(dir_okay=False, writable=True), + help="Output file path (default: stdout)", +) +def generate_openapi_spec(output: Path) -> None: + """ + Generates OpenAPI specification of Backend.AI API. + """ + openapi = asyncio.run(_generate()) + if output == "-" or output is None: + print(json.dumps(openapi, ensure_ascii=False, indent=2)) + else: + with open(output, mode="w") as fw: + fw.write(json.dumps(openapi, ensure_ascii=False, indent=2)) + + +if __name__ == "__main__": + main() diff --git a/src/ai/backend/wsproxy/cli/context.py b/src/ai/backend/wsproxy/cli/context.py new file mode 100644 index 00000000000..52f84db8ba0 --- /dev/null +++ b/src/ai/backend/wsproxy/cli/context.py @@ -0,0 +1,41 @@ +from pathlib import Path +from typing import Self + +import click + +from ai.backend.common.logging import AbstractLogger, LocalLogger + +from ..config import ServerConfig +from ..config import load as load_config + + +class CLIContext: + _local_config: ServerConfig | None + _logger: AbstractLogger + + def __init__(self, config_path: Path, log_level: str) -> None: + self.config_path = config_path + self.log_level = log_level + self._local_config = None + + @property + def local_config(self) -> ServerConfig: + # Lazy-load the configuration only when requested. + if self._local_config is None: + self._local_config = load_config(self.config_path, self.log_level) + return self._local_config + + def __enter__(self) -> Self: + # The "start-server" command is injected by ai.backend.cli from the entrypoint + # and it has its own multi-process-aware logging initialization. + # If we duplicate the local logging with it, the process termination may hang. + click_ctx = click.get_current_context() + if click_ctx.invoked_subcommand != "start-server": + self._logger = LocalLogger({}) + self._logger.__enter__() + return self + + def __exit__(self, *exc_info) -> None: + click_ctx = click.get_current_context() + if click_ctx.invoked_subcommand != "start-server": + self._logger.__exit__() diff --git a/src/ai/backend/wsproxy/config.py b/src/ai/backend/wsproxy/config.py new file mode 100644 index 00000000000..8526ad8b90f --- /dev/null +++ b/src/ai/backend/wsproxy/config.py @@ -0,0 +1,514 @@ +import enum +import os +import pwd +import socket +import sys +import types +import typing +from dataclasses import dataclass +from pathlib import Path +from pprint import pformat +from typing import Annotated, Any + +import click +from pydantic import ( + BaseModel, + ByteSize, + ConfigDict, + Field, + GetCoreSchemaHandler, + GetJsonSchemaHandler, + ValidationError, +) +from pydantic.json_schema import JsonSchemaValue +from pydantic_core import PydanticUndefined, core_schema + +from ai.backend.common import config + +from .types import EventLoopType, ProxyProtocol + +_file_perm = (Path(__file__).parent / "server.py").stat() + + +class BaseSchema(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + from_attributes=True, + use_enum_values=True, + ) + + +class HostPortPair(BaseSchema): + host: Annotated[str, Field(examples=["127.0.0.1"])] + port: Annotated[int, Field(gt=0, lt=65536, examples=[8201])] + + def __repr__(self) -> str: + return f"{self.host}:{self.port}" + + def __str__(self) -> str: + return self.__repr__() + + def __getitem__(self, *args) -> int | str: + if args[0] == 0: + return self.host + elif args[0] == 1: + return self.port + else: + raise KeyError(*args) + + +@dataclass +class UserID: + default_uid: int | None = None + + @classmethod + def uid_validator( + cls, + value: int | str | None, + ) -> int: + if value is None: + assert cls.default_uid, "value is None but default_uid not provided" + return cls.default_uid + assert isinstance(value, (int, str)), "value must be an integer" + match value: + case int(): + if value == -1: + return os.getuid() + else: + return value + case str(): + try: + _value = int(value) + if _value == -1: + return os.getuid() + else: + return _value + except ValueError: + try: + return pwd.getpwnam(value).pw_uid + except KeyError: + assert False, f"no such user {value} in system" + + @classmethod + def __get_pydantic_core_schema__( + cls, + _source_type: Any, + _handler: GetCoreSchemaHandler, + ) -> core_schema.CoreSchema: + schema = core_schema.chain_schema([ + core_schema.union_schema([ + core_schema.int_schema(), + core_schema.str_schema(), + ]), + core_schema.no_info_plain_validator_function(cls.uid_validator), + ]) + + return core_schema.json_or_python_schema( + json_schema=schema, + python_schema=core_schema.union_schema([ + # check if it's an instance first before doing any further work + core_schema.union_schema([ + core_schema.is_instance_schema(int), + core_schema.is_instance_schema(str), + ]), + schema, + ]), + serialization=core_schema.plain_serializer_function_ser_schema(int), + ) + + @classmethod + def __get_pydantic_json_schema__( + cls, _core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler + ) -> JsonSchemaValue: + # Use the same schema that would be used for `int` + return handler( + core_schema.union_schema([ + core_schema.int_schema(), + core_schema.str_schema(), + ]) + ) + + +@dataclass +class GroupID: + default_gid: int | None = None + + @classmethod + def uid_validator( + cls, + value: int | str | None, + ) -> int: + if value is None: + assert cls.default_gid, "value is None but default_gid not provided" + assert isinstance(value, (int, str)), "value must be an integer" + match value: + case int(): + if value == -1: + return os.getgid() + else: + return value + case str(): + try: + _value = int(value) + if _value == -1: + return os.getgid() + else: + return _value + except ValueError: + try: + return pwd.getpwnam(value).pw_gid + except KeyError: + assert False, f"no such user {value} in system" + + @classmethod + def __get_pydantic_core_schema__( + cls, + _source_type: Any, + _handler: GetCoreSchemaHandler, + ) -> core_schema.CoreSchema: + schema = core_schema.chain_schema([ + core_schema.union_schema([ + core_schema.int_schema(), + core_schema.str_schema(), + ]), + core_schema.no_info_plain_validator_function(cls.uid_validator), + ]) + + return core_schema.json_or_python_schema( + json_schema=schema, + python_schema=core_schema.union_schema([ + # check if it's an instance first before doing any further work + core_schema.union_schema([ + core_schema.is_instance_schema(int), + core_schema.is_instance_schema(str), + ]), + schema, + ]), + serialization=core_schema.plain_serializer_function_ser_schema(int), + ) + + @classmethod + def __get_pydantic_json_schema__( + cls, _core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler + ) -> JsonSchemaValue: + # Use the same schema that would be used for `int` + return handler( + core_schema.union_schema([ + core_schema.int_schema(), + core_schema.str_schema(), + ]) + ) + + +class LogLevel(str, enum.Enum): + DEBUG = "DEBUG" + INFO = "INFO" + WARNING = "WARNING" + ERROR = "ERROR" + CRITICAL = "CRITICAL" + NOTSET = "NOTSET" + + +class LogFormat(str, enum.Enum): + SIMPLE = "simple" + VERBOSE = "verbose" + + +class LogDriver(str, enum.Enum): + CONSOLE = "console" + LOGSTASH = "logstash" + FILE = "file" + GRAYLOG = "graylog" + + +class LogstashProtocol(str, enum.Enum): + ZMQ_PUSH = "zmq.push" + ZMQ_PUB = "zmq.pub" + TCP = "tcp" + UDP = "udp" + + +default_pkg_ns = {"": "WARNING", "ai.backend": "DEBUG", "tests": "DEBUG", "aiohttp": "INFO"} + + +class ConsoleLogConfig(BaseSchema): + colored: Annotated[ + bool | None, Field(default=None, description="Opt to print colorized log.", examples=[True]) + ] + format: Annotated[ + LogFormat, Field(default=LogFormat.VERBOSE, description="Determine verbosity of log.") + ] + + +class FileLogConfig(BaseSchema): + path: Annotated[Path, Field(description="Path to store log.", examples=["/var/log/backend.ai"])] + filename: Annotated[str, Field(description="Log file name.", examples=["wsproxy.log"])] + backup_count: Annotated[ + int, Field(description="Number of outdated log files to retain.", default=5) + ] + rotation_size: Annotated[ + ByteSize, Field(description="Maximum size for a single log file.", default="10M") + ] + format: Annotated[ + LogFormat, Field(default=LogFormat.VERBOSE, description="Determine verbosity of log.") + ] + + +class LogstashConfig(BaseSchema): + endpoint: Annotated[ + HostPortPair, + Field( + description="Connection information of logstash node.", + examples=[HostPortPair(host="127.0.0.1", port=8001)], + ), + ] + protocol: Annotated[ + LogstashProtocol, + Field( + description="Protocol to communicate with logstash server.", + default=LogstashProtocol.TCP, + ), + ] + ssl_enabled: Annotated[ + bool, Field(description="Use TLS to communicate with logstash server.", default=True) + ] + ssl_verify: Annotated[ + bool, + Field( + description="Verify validity of TLS certificate when communicating with logstash.", + default=True, + ), + ] + + +class GraylogConfig(BaseSchema): + host: Annotated[str, Field(description="Graylog hostname.", examples=["127.0.0.1"])] + port: Annotated[int, Field(description="Graylog server port number.", examples=[8000])] + level: Annotated[LogLevel, Field(description="Log level.", default=LogLevel.INFO)] + ssl_verify: Annotated[ + bool, + Field( + description="Verify validity of TLS certificate when communicating with logstash.", + default=True, + ), + ] + ca_certs: Annotated[ + str | None, + Field( + description="Path to Root CA certificate file.", + examples=["/etc/ssl/ca.pem"], + default=None, + ), + ] + keyfile: Annotated[ + str | None, + Field( + description="Path to TLS private key file.", + examples=["/etc/backend.ai/graylog/privkey.pem"], + default=None, + ), + ] + certfile: Annotated[ + str | None, + Field( + description="Path to TLS certificate file.", + examples=["/etc/backend.ai/graylog/cert.pem"], + default=None, + ), + ] + + +class LoggingConfig(BaseSchema): + level: Annotated[LogLevel, Field(default=LogLevel.INFO, description="Log level.")] + pkg_ns: Annotated[ + dict[str, LogLevel], + Field( + description="Override default log level for specific scope of package", + default=default_pkg_ns, + ), + ] + drivers: Annotated[ + list[LogDriver], + Field(default=[LogDriver.CONSOLE], description="Array of log drivers to print."), + ] + console: Annotated[ + ConsoleLogConfig, Field(default=ConsoleLogConfig(colored=None, format=LogFormat.VERBOSE)) + ] + file: Annotated[FileLogConfig | None, Field(default=None)] + logstash: Annotated[LogstashConfig | None, Field(default=None)] + graylog: Annotated[GraylogConfig | None, Field(default=None)] + + +class DebugConfig(BaseSchema): + enabled: Annotated[bool, Field(default=False)] + asyncio: Annotated[bool, Field(default=False)] + enhanced_aiomonitor_task_info: Annotated[bool, Field(default=False)] + log_events: Annotated[bool, Field(default=False)] + + +class WSProxyConfig(BaseSchema): + ipc_base_path: Annotated[ + Path, + Field( + default=Path("/tmp/backend.ai/ipc"), + description="Directory to store temporary UNIX sockets.", + ), + ] + event_loop: Annotated[ + EventLoopType, + Field(default=EventLoopType.ASYNCIO, description="Type of event loop to use."), + ] + pid_file: Annotated[ + Path, + Field( + default=Path(os.devnull), + description="Place to store process PID.", + examples=["/run/backend.ai/wsproxy/wsproxy.pid"], + ), + ] + + id: Annotated[ + str, + Field(default=f"i-{socket.gethostname()}", examples=["i-node01"], description="Node id."), + ] + user: Annotated[ + int, + UserID(default_uid=_file_perm.st_uid), + Field(default=_file_perm.st_uid, description="Process owner."), + ] + group: Annotated[ + int, + GroupID(default_gid=_file_perm.st_gid), + Field(default=_file_perm.st_uid, description="Process group."), + ] + + bind_host: Annotated[ + str, + Field( + default="0.0.0.0", + description="Bind address of the port opened on behalf of wsproxy worker", + ), + ] + advertised_host: Annotated[ + str, Field(examples=["example.com"], description="Hostname to be advertised to client") + ] + + bind_api_port: Annotated[ + int, Field(default=5050, description="Port number to bind for API server") + ] + advertised_api_port: Annotated[ + int | None, + Field(default=None, examples=[15050], description="API port number reachable from client"), + ] + + bind_proxy_port_range: Annotated[ + tuple[int, int], + Field(default=[10200, 10300], description="Port number to bind for actual traffic"), + ] + advertised_proxy_port_range: Annotated[ + tuple[int, int] | None, + Field( + default=None, + examples=[[20200, 20300]], + description="Traffic port range reachable from client", + ), + ] + + protocol: Annotated[ + ProxyProtocol, Field(default=ProxyProtocol.HTTP, description="Proxy protocol") + ] + + jwt_encrypt_key: Annotated[ + str, Field(examples=["50M3G00DL00KING53CR3T"], description="JWT encryption key") + ] + permit_hash_key: Annotated[ + str, Field(examples=["50M3G00DL00KING53CR3T"], description="Permit hash key") + ] + + api_secret: Annotated[str, Field(examples=["50M3G00DL00KING53CR3T"], description="API secret")] + + aiomonitor_termui_port: Annotated[ + int, + Field( + gt=0, lt=65536, description="Port number for aiomonitor termui server.", default=48500 + ), + ] + aiomonitor_webui_port: Annotated[ + int, + Field( + gt=0, lt=65536, description="Port number for aiomonitor webui server.", default=49500 + ), + ] + + +class ServerConfig(BaseSchema): + wsproxy: WSProxyConfig + logging: LoggingConfig + debug: DebugConfig + + +def load(config_path: Path | None = None, log_level: str = "INFO") -> ServerConfig: + # Determine where to read configuration. + raw_cfg, _ = config.read_from_file(config_path, "wsproxy") + + config.override_key(raw_cfg, ("debug", "enabled"), log_level == "DEBUG") + config.override_key(raw_cfg, ("logging", "level"), log_level.upper()) + config.override_key(raw_cfg, ("logging", "pkg-ns", "ai.backend"), log_level.upper()) + config.override_key(raw_cfg, ("logging", "pkg-ns", "aiohttp"), log_level.upper()) + + # Validate and fill configurations + # (allow_extra will make configs to be forward-copmatible) + try: + cfg = ServerConfig(**raw_cfg) + if cfg.debug.enabled: + print("== WSProxy configuration ==", file=sys.stderr) + print(pformat(cfg.model_dump()), file=sys.stderr) + except ValidationError as e: + print( + "ConfigurationError: Could not read or validate the manager local config:", + file=sys.stderr, + ) + print(pformat(e), file=sys.stderr) + raise click.Abort() + else: + return cfg + + +class Undefined: + pass + + +class UnsupportedTypeError(RuntimeError): + pass + + +def generate_example_json( + schema: type[BaseSchema] | types.GenericAlias | types.UnionType, parent: list[str] = [] +) -> dict | list: + if isinstance(schema, types.UnionType): + return generate_example_json(typing.get_args(schema)[0], parent=[*parent]) + elif isinstance(schema, types.GenericAlias): + if typing.get_origin(schema) != list: + raise RuntimeError("GenericAlias other than list not supported!") + return [generate_example_json(typing.get_args(schema)[0], parent=[*parent])] + elif issubclass(schema, BaseSchema): + res = {} + for name, info in schema.model_fields.items(): + config_key = [*parent, name] + assert info.annotation + alternative_example = Undefined + if info.examples: + res[name] = info.examples[0] + elif info.default != PydanticUndefined: + alternative_example = info.default + if name not in res: + try: + res[name] = generate_example_json(info.annotation, parent=config_key) + except RuntimeError: + if alternative_example != Undefined: + res[name] = alternative_example + else: + raise + return res + else: + raise UnsupportedTypeError(str(schema)) diff --git a/src/ai/backend/wsproxy/defs.py b/src/ai/backend/wsproxy/defs.py new file mode 100644 index 00000000000..6d18fccd709 --- /dev/null +++ b/src/ai/backend/wsproxy/defs.py @@ -0,0 +1,22 @@ +from typing import TYPE_CHECKING, AsyncContextManager, Callable, TypeAlias +from uuid import UUID + +import aiohttp_cors +import attrs + +from .config import ServerConfig + +if TYPE_CHECKING: + from .proxy.frontend.abc import AbstractFrontend + + +@attrs.define(slots=True, auto_attribs=True, init=False) +class RootContext: + pidx: int + proxy_frontend: "AbstractFrontend" + worker_id: UUID + local_config: ServerConfig + cors_options: dict[str, aiohttp_cors.ResourceOptions] + + +CleanupContext: TypeAlias = Callable[["RootContext"], AsyncContextManager[None]] diff --git a/src/ai/backend/wsproxy/exceptions.py b/src/ai/backend/wsproxy/exceptions.py new file mode 100644 index 00000000000..fb1298f8ab3 --- /dev/null +++ b/src/ai/backend/wsproxy/exceptions.py @@ -0,0 +1,209 @@ +""" +This module defines a series of Backend.AI-specific errors based on HTTP Error +classes from aiohttp. +Raising a BackendError is automatically mapped to a corresponding HTTP error +response with RFC7807-style JSON-encoded description in its response body. + +In the client side, you should use "type" field in the body to distinguish +canonical error types beacuse "title" field may change due to localization and +future UX improvements. +""" + +from __future__ import annotations + +import json +from typing import Any, Optional + +from aiohttp import web + +from ai.backend.common.plugin.hook import HookResult + + +class BackendError(web.HTTPError): + """ + An RFC-7807 error class as a drop-in replacement of the original + aiohttp.web.HTTPError subclasses. + """ + + error_type: str = "https://api.backend.ai/probs/general-error" + error_title: str = "General Backend API Error." + + content_type: str + extra_msg: Optional[str] + + body_dict: dict[str, Any] + + def __init__(self, extra_msg: str | None = None, extra_data: Any = None, **kwargs): + super().__init__(**kwargs) + self.args = (self.status_code, self.reason, self.error_type) + self.empty_body = False + self.content_type = "application/problem+json" + self.extra_msg = extra_msg + self.extra_data = extra_data + body = { + "type": self.error_type, + "title": self.error_title, + } + if extra_msg is not None: + body["msg"] = extra_msg + if extra_data is not None: + body["data"] = extra_data + self.body_dict = body + self.body = json.dumps(body).encode() + + def __str__(self): + lines = [] + if self.extra_msg: + lines.append(f"{self.error_title} ({self.extra_msg})") + else: + lines.append(self.error_title) + if self.extra_data: + lines.append(" -> extra_data: " + repr(self.extra_data)) + return "\n".join(lines) + + def __repr__(self): + lines = [] + if self.extra_msg: + lines.append( + f"<{type(self).__name__}({self.status}): {self.error_title} ({self.extra_msg})>" + ) + else: + lines.append(f"<{type(self).__name__}({self.status}): {self.error_title}>") + if self.extra_data: + lines.append(" -> extra_data: " + repr(self.extra_data)) + return "\n".join(lines) + + def __reduce__(self): + return ( + type(self), + (), # empty the constructor args to make unpickler to use + # only the exact current state in __dict__ + self.__dict__, + ) + + +class URLNotFound(BackendError, web.HTTPNotFound): + error_type = "https://api.backend.ai/probs/url-not-found" + error_title = "Unknown URL path." + + +class ObjectNotFound(BackendError, web.HTTPNotFound): + error_type = "https://api.backend.ai/probs/object-not-found" + object_name = "object" + + def __init__( + self, + *, + extra_msg: str | None = None, + extra_data: Any = None, + object_name: str | None = None, + **kwargs, + ) -> None: + if object_name: + self.object_name = object_name + self.error_title = f"E00002: No such {self.object_name}." + super().__init__(extra_msg, extra_data, **kwargs) + + +class GenericBadRequest(BackendError, web.HTTPBadRequest): + error_type = "https://api.backend.ai/probs/generic-bad-request" + error_title = "Bad request." + + +class RejectedByHook(BackendError, web.HTTPBadRequest): + error_type = "https://api.backend.ai/probs/rejected-by-hook" + error_title = "Operation rejected by a hook plugin." + + @classmethod + def from_hook_result(cls, result: HookResult) -> RejectedByHook: + return cls( + extra_msg=result.reason, + extra_data={ + "plugins": result.src_plugin, + }, + ) + + +class InvalidCredentials(BackendError, web.HTTPBadRequest): + error_type = "https://api.backend.ai/probs/invalid-credentials" + error_title = "Authentication credentials not valid." + + +class GenericForbidden(BackendError, web.HTTPForbidden): + error_type = "https://api.backend.ai/probs/generic-forbidden" + error_title = "Forbidden operation." + + +class InsufficientPrivilege(BackendError, web.HTTPForbidden): + error_type = "https://api.backend.ai/probs/insufficient-privilege" + error_title = "Insufficient privilege." + + +class MethodNotAllowed(BackendError, web.HTTPMethodNotAllowed): + error_type = "https://api.backend.ai/probs/method-not-allowed" + error_title = "HTTP Method Not Allowed." + + +class InternalServerError(BackendError, web.HTTPInternalServerError): + error_type = "https://api.backend.ai/probs/internal-server-error" + error_title = "Internal server error." + + +class ServerMisconfiguredError(BackendError, web.HTTPInternalServerError): + error_type = "https://api.backend.ai/probs/server-misconfigured" + error_title = "E00001: Service misconfigured." + + +class ServiceUnavailable(BackendError, web.HTTPServiceUnavailable): + error_type = "https://api.backend.ai/probs/service-unavailable" + error_title = "Serivce unavailable." + + +class QueryNotImplemented(BackendError, web.HTTPServiceUnavailable): + error_type = "https://api.backend.ai/probs/not-implemented" + error_title = "This API query is not implemented." + + +class InvalidAuthParameters(BackendError, web.HTTPBadRequest): + error_type = "https://api.backend.ai/probs/invalid-auth-params" + error_title = "Missing or invalid authorization parameters." + + +class AuthorizationFailed(BackendError, web.HTTPUnauthorized): + error_type = "https://api.backend.ai/probs/auth-failed" + error_title = "Credential/signature mismatch." + + +class PasswordExpired(BackendError, web.HTTPUnauthorized): + error_type = "https://api.backend.ai/probs/password-expired" + error_title = "Password has expired." + + +class InvalidAPIParameters(BackendError, web.HTTPBadRequest): + error_type = "https://api.backend.ai/probs/invalid-api-params" + error_title = "Missing or invalid API parameters." + + +class GraphQLError(BackendError, web.HTTPBadRequest): + error_type = "https://api.backend.ai/probs/graphql-error" + error_title = "GraphQL-generated error." + + +class WorkerNotAvailable(BackendError): + error_title = "Worker not available" + + +class PortNotAvailable(BackendError): + error_title = "Designated port already occupied" + + +class UnsupportedProtocol(BackendError): + error_title = "Unsupported protocol" + + +class DatabaseError(BackendError): + error_title = "error while communicating with database" + + +class ContainerConnectionRefused(BackendError): + error_title: str = "Cannot connect to Backend.AI kernel." diff --git a/src/ai/backend/wsproxy/openapi.py b/src/ai/backend/wsproxy/openapi.py new file mode 100644 index 00000000000..ae4c690e431 --- /dev/null +++ b/src/ai/backend/wsproxy/openapi.py @@ -0,0 +1,178 @@ +import sys +import textwrap +from collections import defaultdict +from typing import Any, Union, get_args, get_origin, get_type_hints + +from aiohttp import web +from aiohttp.web_urldispatcher import AbstractResource, DynamicResource +from pydantic import BaseModel + +from . import __version__ +from .types import PydanticResponse + + +class ParseError(Exception): + pass + + +def get_path_parameters(resource: AbstractResource) -> list[dict]: + params = [] + if isinstance(resource, DynamicResource): + if groupindex := resource._pattern.groupindex: + params = [ + {"name": param, "in": "path", "required": True, "schema": {"type": "string"}} + for param in groupindex.keys() + ] + return params + + +def generate_openapi( + component: str, subapps: list[web.Application], verbose=False +) -> dict[str, Any]: + openapi: dict[str, Any] = { + "openapi": "3.1.0", + "info": { + "title": f"Backend.AI {component} API", + "description": f"Backend.AI {component} REST API specification", + "version": __version__, + "contact": { + "name": "Lablup Inc.", + "url": "https://docs.backend.ai", + "email": "contect@lablup.com", + }, + }, + "components": { + "securitySchemes": { + "X-BackendAI-Token": { + "type": "http", + "scheme": "bearer", + "bearerFormat": "JWT", + }, + }, + "schemas": {}, + }, + "paths": defaultdict(lambda: {}), + } + operation_id_mapping: defaultdict[str, int] = defaultdict(lambda: 0) + for app in subapps: + prefix = app.get("prefix", "root") + for route in app.router.routes(): + resource = route.resource + if not resource: + continue + + if "_root_app" not in app: + path = "/" + ("" if prefix == "root" else prefix) + resource.canonical + else: + path = resource.canonical + method = route.method + + if method == "OPTIONS": + continue + + operation_id = f"{prefix}.{route.handler.__name__}" + if verbose: + sys.stderr.write(f"parsing {operation_id}\n") + sys.stderr.flush() + operation_id_mapping[operation_id] += 1 + if (operation_id_count := operation_id_mapping[operation_id]) > 1: + operation_id += f".{operation_id_count}" + + description = [] + if route.handler.__doc__: + description.append(textwrap.dedent(route.handler.__doc__)) + + route_def = { + "operationId": operation_id, + "tags": [prefix], + "responses": dict(), + } + parameters = [] + parameters.extend(get_path_parameters(resource)) + if hasattr(route.handler, "_backend_attrs"): + preconds = [] + handler_attrs = getattr(route.handler, "_backend_attrs") + if handler_attrs.get("auth_required"): + route_def["security"] = [{"X-BackendAI-Token": []}] + if auth_scope := handler_attrs.get("auth_scope"): + preconds.append( + f"Requires {auth_scope.capitalize()} token present at `X-BackendAI-Token` request header to work." + ) + if preconds: + description.append("\n**Preconditions:**") + for item in preconds: + description.append(f"* {item}") + description.append("") + if request_scheme := handler_attrs.get("request_scheme"): + if issubclass(request_scheme, BaseModel): + schema_name = request_scheme.__name__ + request_schema = request_scheme.model_json_schema( + ref_template="#/components/schemas/{model}" + ) + + if additional_definitions := request_schema.pop("$defs", None): + openapi["components"]["schemas"].update(additional_definitions) + openapi["components"]["schemas"][schema_name] = request_schema + route_def["requestBody"] = { + "deprecated": handler_attrs.get("deprecated", False), + "content": { + "application/json": { + "schema": {"$ref": f"#/components/schemas/{schema_name}"} + } + }, + } + else: + raise RuntimeError( + f"{request_scheme} not considered as a valid request type" + ) + + route_def["parameters"] = parameters + route_def["description"] = "\n".join(description) + type_hints = get_type_hints(route.handler) + + def _parse_schema(model_cls: type[BaseModel]) -> dict: + if not issubclass(model_cls, BaseModel): + raise RuntimeError(f"{model_cls} not considered as a valid response type") + + schema_name = model_cls.__name__ + response_schema = model_cls.model_json_schema( + ref_template="#/components/schemas/{model}" + ) + + if additional_definitions := response_schema.pop("$defs", None): + openapi["components"]["schemas"].update(additional_definitions) + openapi["components"]["schemas"][schema_name] = response_schema + return { + "description": "", + "content": { + "application/json": { + "schema": {"$ref": f"#/components/schemas/{schema_name}"} + } + }, + } + + if (ret_type := type_hints.get("return")) and (ret_type_origin := get_origin(ret_type)): + if ret_type_origin == Union: + response_classes = get_args(ret_type) + responses = dict() + + for cls in response_classes: + if (subclass_origin := get_origin(cls)) and issubclass( + subclass_origin, PydanticResponse + ): + if "200" in responses: + raise RuntimeError( + "Cannot specify multiple response types for a single API handler" + ) + responses["200"] = _parse_schema(get_args(cls)[0]) + elif issubclass(cls, web.HTTPTemporaryRedirect): + responses["301"] = {"Description": "Redirection"} + elif issubclass(cls, web.HTTPPermanentRedirect): + responses["302"] = {"Description": "Redirection"} + + route_def["responses"] = responses + elif issubclass(ret_type_origin, PydanticResponse): + route_def["responses"] = {"200": _parse_schema(get_args(ret_type)[0])} + + openapi["paths"][path][method.lower()] = route_def + return openapi diff --git a/src/ai/backend/wsproxy/proxy/BUILD b/src/ai/backend/wsproxy/proxy/BUILD new file mode 100644 index 00000000000..73574424040 --- /dev/null +++ b/src/ai/backend/wsproxy/proxy/BUILD @@ -0,0 +1 @@ +python_sources(name="src") diff --git a/src/ai/backend/wsproxy/proxy/backend/BUILD b/src/ai/backend/wsproxy/proxy/backend/BUILD new file mode 100644 index 00000000000..73574424040 --- /dev/null +++ b/src/ai/backend/wsproxy/proxy/backend/BUILD @@ -0,0 +1 @@ +python_sources(name="src") diff --git a/src/ai/backend/wsproxy/proxy/backend/__init__.py b/src/ai/backend/wsproxy/proxy/backend/__init__.py new file mode 100644 index 00000000000..ba191f0babb --- /dev/null +++ b/src/ai/backend/wsproxy/proxy/backend/__init__.py @@ -0,0 +1,7 @@ +__all__ = ( + "HTTPBackend", + "TCPBackend", +) + +from .http import HTTPBackend +from .tcp import TCPBackend diff --git a/src/ai/backend/wsproxy/proxy/backend/abc.py b/src/ai/backend/wsproxy/proxy/backend/abc.py new file mode 100644 index 00000000000..b6a7127687f --- /dev/null +++ b/src/ai/backend/wsproxy/proxy/backend/abc.py @@ -0,0 +1,30 @@ +import logging +from abc import ABCMeta +from dataclasses import dataclass +from typing import Any + +import aiohttp +from yarl import URL + +from ai.backend.common.logging import BraceStyleAdapter +from ai.backend.wsproxy.defs import RootContext +from ai.backend.wsproxy.types import Circuit + +log = BraceStyleAdapter(logging.getLogger(__spec__.name)) # type: ignore[name-defined] + + +@dataclass +class HttpRequest: + method: str + path: str | URL + headers: dict[str, Any] + body: aiohttp.StreamReader + + +class AbstractBackend(metaclass=ABCMeta): + root_context: RootContext + circuit: Circuit + + def __init__(self, root_context: RootContext, circuit: Circuit) -> None: + self.root_context = root_context + self.circuit = circuit diff --git a/src/ai/backend/wsproxy/proxy/backend/http.py b/src/ai/backend/wsproxy/proxy/backend/http.py new file mode 100644 index 00000000000..0033eef6b38 --- /dev/null +++ b/src/ai/backend/wsproxy/proxy/backend/http.py @@ -0,0 +1,230 @@ +import asyncio +import logging +import random +from contextlib import asynccontextmanager +from typing import AsyncIterator + +import aiohttp +from aiohttp import ClientConnectorError, web + +from ai.backend.common.logging import BraceStyleAdapter +from ai.backend.wsproxy.exceptions import ContainerConnectionRefused, WorkerNotAvailable +from ai.backend.wsproxy.types import RouteInfo + +from .abc import AbstractBackend, HttpRequest + +log = BraceStyleAdapter(logging.getLogger(__spec__.name)) # type: ignore[name-defined] + +CHUNK_SIZE = 1 * 1024 * 1024 # 1 KiB + + +class HTTPBackend(AbstractBackend): + routes: list[RouteInfo] + + def __init__(self, routes: list[RouteInfo], *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self.routes = routes + + @property + def selected_route(self) -> RouteInfo: + if len(self.routes) == 0: + raise WorkerNotAvailable + elif len(self.routes) == 1: + selected_route = self.routes[0] + if selected_route.traffic_ratio == 0: + raise WorkerNotAvailable + else: + routes = [ + r for r in sorted(self.routes, key=lambda r: r.traffic_ratio) if r.traffic_ratio > 0 + ] + ranges: list[float] = [] + ratio_sum = 0.0 + for route in routes: + ratio_sum += route.traffic_ratio + ranges.append(ratio_sum) + rand = random.random() * ranges[-1] + for i in range(len(ranges)): + ceiling = ranges[0] + if (i == 0 and rand < ceiling) or (ranges[i - 1] <= rand and rand < ceiling): + selected_route = routes[i] + break + else: + selected_route = routes[-1] + return selected_route + + def get_x_forwarded_proto(self, request: web.Request) -> str: + return request.headers.get("x-forwarded-proto") or "http" + + def get_x_forwarded_host(self, request: web.Request) -> str | None: + return request.headers.get("x-forwarded-host") or request.headers.get("host") + + @asynccontextmanager + async def request_http( + self, route: RouteInfo, request: HttpRequest + ) -> AsyncIterator[aiohttp.ClientResponse]: + base_url = f"http://{route.kernel_host}:{route.kernel_port}" + headers = dict(request.headers) + + if headers.get("Transfer-Encoding", "").lower() == "chunked": + del headers["Transfer-Encoding"] + async with aiohttp.ClientSession( + base_url=base_url, + auto_decompress=False, + ) as session: + async with session.request( + request.method, + request.path, + headers=headers, + data=request.body, + ) as response: + yield response + + @asynccontextmanager + async def connect_websocket( + self, route: RouteInfo, request: web.Request, protocols: list[str] = [] + ) -> AsyncIterator[aiohttp.ClientWebSocketResponse]: + base_url = f"http://{route.kernel_host}:{route.kernel_port}" + async with aiohttp.ClientSession( + base_url=base_url, + auto_decompress=False, + ) as session: + log.debug("connecting to {}{}", base_url, request.rel_url) + async with session.ws_connect(request.rel_url, protocols=protocols) as ws: + log.debug("connected") + yield ws + + async def proxy_http(self, request: web.Request) -> web.StreamResponse: + protocol = self.get_x_forwarded_proto(request) + host = self.get_x_forwarded_host(request) + remote_host, remote_port = ( + request.transport.get_extra_info("peername") if request.transport else None, + None, + ) + headers = { + **request.headers, + "x-forwarded-proto": protocol, + } + if self.circuit.app == "rstudio": + headers["x-rstudio-proto"] = protocol + if host: + headers["forwarded"] = f"host={host};proto={protocol}" + headers["x-forwarded-host"] = host + if self.circuit.app == "rstudio": + headers["x-rstudio-request"] = f"{protocol}://{host}{request.path or ''}" + split = host.split(":") + if len(split) >= 2: + headers["x-forwarded-port"] = split[1] + elif remote_port: + headers["x-forwarded-port"] = remote_port + if remote_host: + headers["x-forwarded-for"] = f"{remote_host[0]}:{remote_host[1]}" + upstream_request = HttpRequest( + request.method, + request.rel_url, + headers, + request.content, + ) + route = self.selected_route + log.debug( + "Proxying {} {} HTTP Request to {}:{}", + request.method, + request.rel_url, + route.kernel_host, + route.kernel_port, + ) + + try: + async with self.request_http(route, upstream_request) as backend_response: + response = web.StreamResponse( + status=backend_response.status, + headers={**backend_response.headers, "Access-Control-Allow-Origin": "*"}, + ) + await response.prepare(request) + async for data in backend_response.content.iter_chunked(CHUNK_SIZE): + await response.write(data) + await response.drain() + + return response + except aiohttp.ClientOSError as e: + raise ContainerConnectionRefused from e + except: + log.exception("") + raise + + async def proxy_ws(self, request: web.Request) -> web.WebSocketResponse: + stop_event = asyncio.Event() + + async def _proxy_task( + left: web.WebSocketResponse | aiohttp.ClientWebSocketResponse, + right: web.WebSocketResponse | aiohttp.ClientWebSocketResponse, + tag="(unknown)", + ) -> None: + try: + async for msg in left: + match msg.type: + case aiohttp.WSMsgType.TEXT: + await right.send_str(msg.data) + case aiohttp.WSMsgType.BINARY: + await right.send_bytes(msg.data) + case aiohttp.WSMsgType.PING: + await right.ping(msg.data) + case aiohttp.WSMsgType.PONG: + await right.pong(msg.data) + case aiohttp.WSMsgType.CLOSE: + log.debug("{}: websocket closed", tag) + await right.close(code=msg.data) + case aiohttp.WSMsgType.ERROR: + log.debug("{}: websocket closed with error", tag) + await right.close() + case _: + log.debug("{}: Unhandled message type {}", tag, msg.type) + except ConnectionResetError: + pass + except Exception: + log.exception("") + raise + finally: + log.debug("setting stop event") + stop_event.set() + + route = self.selected_route + log.debug( + "Proxying {} {} WS Request to {}:{}", + request.method, + request.path or "/", + route.kernel_host, + route.kernel_port, + ) + + if "Sec-WebSocket-Protocol" in request.headers: + protocols = list(request.headers["Sec-WebSocket-Protocol"].split(",")) + else: + protocols = [] + + downstream_ws = web.WebSocketResponse(protocols=protocols) + await downstream_ws.prepare(request) + + try: + async with self.connect_websocket(route, request, protocols=protocols) as upstream_ws: + try: + async with asyncio.TaskGroup() as group: + group.create_task( + _proxy_task(upstream_ws, downstream_ws, tag="(up -> down)") + ) + group.create_task( + _proxy_task(downstream_ws, upstream_ws, tag="(down -> up)") + ) + log.debug("created tasks, now waiting until one of two tasks end") + await stop_event.wait() + finally: + log.debug("tasks ended") + if not downstream_ws.closed: + await downstream_ws.close() + if not upstream_ws.closed: + await upstream_ws.close() + log.debug("websocket connection closed") + except ClientConnectorError: + log.debug("upstream connection closed") + if not downstream_ws.closed: + await downstream_ws.close() + return downstream_ws diff --git a/src/ai/backend/wsproxy/proxy/backend/tcp.py b/src/ai/backend/wsproxy/proxy/backend/tcp.py new file mode 100644 index 00000000000..ca92b672c33 --- /dev/null +++ b/src/ai/backend/wsproxy/proxy/backend/tcp.py @@ -0,0 +1,108 @@ +import asyncio +import logging +import random +import socket +from typing import Final + +from ai.backend.common.logging import BraceStyleAdapter +from ai.backend.wsproxy.exceptions import WorkerNotAvailable +from ai.backend.wsproxy.types import RouteInfo + +from .abc import AbstractBackend + +log = BraceStyleAdapter(logging.getLogger(__spec__.name)) # type: ignore[name-defined] + +MAX_BUFFER_SIZE: Final[int] = 1 * 1024 * 1024 + + +class TCPBackend(AbstractBackend): + routes: list[RouteInfo] + + def __init__(self, routes: list[RouteInfo], *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self.routes = routes + + @property + def selected_route(self) -> RouteInfo: + if len(self.routes) == 0: + raise WorkerNotAvailable + elif len(self.routes) == 1: + selected_route = self.routes[0] + if selected_route.traffic_ratio == 0: + raise WorkerNotAvailable + else: + routes = [ + r for r in sorted(self.routes, key=lambda r: r.traffic_ratio) if r.traffic_ratio > 0 + ] + ranges: list[float] = [] + ratio_sum = 0.0 + for route in routes: + ratio_sum += route.traffic_ratio + ranges.append(ratio_sum) + rand = random.random() * ranges[-1] + for i in range(len(ranges)): + ceiling = ranges[0] + if (i == 0 and rand < ceiling) or (ranges[i - 1] <= rand and rand < ceiling): + selected_route = routes[i] + break + else: + selected_route = routes[-1] + return selected_route + + async def bind( + self, down_reader: asyncio.StreamReader, down_writer: asyncio.StreamWriter + ) -> None: + stop_event = asyncio.Event() + + async def _pipe( + reader: asyncio.StreamReader, + writer: asyncio.StreamWriter, + tag="(unknown)", + ) -> None: + try: + while True: + data = await reader.read(n=MAX_BUFFER_SIZE) + if not data: + break + writer.write(data) + await writer.drain() + log.debug("TCPBackend._pipe(t: {}): sent {} bytes", tag, len(data)) + except ConnectionResetError: + log.debug("Conn reset") + pass + except Exception: + log.exception("") + raise + finally: + log.debug("setting stop event") + stop_event.set() + + route = self.selected_route + log.debug( + "Proxying TCP Request to {}:{}", + route.kernel_host, + route.kernel_port, + ) + + try: + sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + # unlike .frontend.tcp this has a chance of being a blocking call since kernel host can be a domain + await asyncio.get_running_loop().run_in_executor( + None, sock.connect, (route.kernel_host, route.kernel_port) + ) + + up_reader, up_writer = await asyncio.open_connection(sock=sock) + log.debug( + "Connected to {}:{}", + route.kernel_host, + route.kernel_port, + ) + async with asyncio.TaskGroup() as group: + group.create_task(_pipe(up_reader, down_writer, tag="up->down")) + group.create_task(_pipe(down_reader, up_writer, tag="down->up")) + finally: + log.debug("tasks ended") + down_writer.close() + await down_writer.wait_closed() + log.debug("TCP connection closed") diff --git a/src/ai/backend/wsproxy/proxy/frontend/BUILD b/src/ai/backend/wsproxy/proxy/frontend/BUILD new file mode 100644 index 00000000000..73574424040 --- /dev/null +++ b/src/ai/backend/wsproxy/proxy/frontend/BUILD @@ -0,0 +1 @@ +python_sources(name="src") diff --git a/src/ai/backend/wsproxy/proxy/frontend/__init__.py b/src/ai/backend/wsproxy/proxy/frontend/__init__.py new file mode 100644 index 00000000000..f64f5c54e91 --- /dev/null +++ b/src/ai/backend/wsproxy/proxy/frontend/__init__.py @@ -0,0 +1,7 @@ +__all__ = [ + "HTTPPortFrontend", + "TCPFrontend", +] + +from .http.port import PortFrontend as HTTPPortFrontend +from .tcp import TCPFrontend diff --git a/src/ai/backend/wsproxy/proxy/frontend/abc.py b/src/ai/backend/wsproxy/proxy/frontend/abc.py new file mode 100644 index 00000000000..31a43889ded --- /dev/null +++ b/src/ai/backend/wsproxy/proxy/frontend/abc.py @@ -0,0 +1,95 @@ +import logging +from abc import ABCMeta, abstractmethod +from typing import Generic, TypeVar +from uuid import UUID + +from ai.backend.common.logging import BraceStyleAdapter +from ai.backend.wsproxy.defs import RootContext +from ai.backend.wsproxy.exceptions import ObjectNotFound +from ai.backend.wsproxy.types import ( + Circuit, + RouteInfo, + TCircuitKey, +) + +from ..backend.abc import AbstractBackend + +log = BraceStyleAdapter(logging.getLogger(__spec__.name)) # type: ignore[name-defined] + +TBackend = TypeVar("TBackend", bound=AbstractBackend) + + +class AbstractFrontend(Generic[TBackend, TCircuitKey], metaclass=ABCMeta): + root_context: RootContext + circuits: dict[TCircuitKey, Circuit] + backends: dict[TCircuitKey, TBackend] + + def __init__(self, root_context: RootContext) -> None: + self.root_context = root_context + self.circuits = {} + self.backends = {} + + def get_circuit_by_endpoint_id(self, endpoint_id: UUID) -> Circuit: + for _, circuit in self.circuits.items(): + if circuit.endpoint_id == endpoint_id: + return circuit + raise ObjectNotFound(object_name="Circuit") + + def get_circuit_by_id(self, id: UUID) -> Circuit: + for _, circuit in self.circuits.items(): + if circuit.id == id: + return circuit + raise ObjectNotFound(object_name="Circuit") + + async def register_circuit(self, circuit: Circuit, routes: list[RouteInfo]) -> None: + key = self.get_circuit_key(circuit) + self.circuits[key] = circuit + self.backends[key] = await self.initialize_backend(circuit, routes) + log.info( + "circuit {} (app:{}, mode: {}) registered", circuit.id, circuit.app, circuit.app_mode + ) + + async def update_circuit_route_info( + self, circuit: Circuit, new_routes: list[RouteInfo] + ) -> None: + key = self.get_circuit_key(circuit) + assert key in self.circuits, "Slot not active" + await self.update_backend(self.backends[key], new_routes) + + async def break_circuit(self, circuit: Circuit) -> None: + key = self.get_circuit_key(circuit) + assert key in self.circuits, "Slot not active" + await self.terminate_backend(self.backends[key]) + del self.backends[key] + del self.circuits[key] + log.info( + "circuit {} (app:{}, mode: {}) unregistered", circuit.id, circuit.app, circuit.app_mode + ) + + async def terminate_all_circuits(self) -> None: + for circuit in list(self.circuits.values()): + await self.break_circuit(circuit) + + @abstractmethod + async def start(self) -> None: + raise NotImplementedError + + @abstractmethod + async def stop(self) -> None: + raise NotImplementedError + + @abstractmethod + async def initialize_backend(self, circuit: Circuit, routes: list[RouteInfo]) -> TBackend: + raise NotImplementedError + + @abstractmethod + async def update_backend(self, backend: TBackend, routes: list[RouteInfo]) -> TBackend: + raise NotImplementedError + + @abstractmethod + async def terminate_backend(self, backend: TBackend) -> None: + raise NotImplementedError + + @abstractmethod + def get_circuit_key(self, circuit: Circuit) -> TCircuitKey: + raise NotImplementedError diff --git a/src/ai/backend/wsproxy/proxy/frontend/http/BUILD b/src/ai/backend/wsproxy/proxy/frontend/http/BUILD new file mode 100644 index 00000000000..73574424040 --- /dev/null +++ b/src/ai/backend/wsproxy/proxy/frontend/http/BUILD @@ -0,0 +1 @@ +python_sources(name="src") diff --git a/src/ai/backend/wsproxy/proxy/frontend/http/abc.py b/src/ai/backend/wsproxy/proxy/frontend/http/abc.py new file mode 100644 index 00000000000..0fe64797de0 --- /dev/null +++ b/src/ai/backend/wsproxy/proxy/frontend/http/abc.py @@ -0,0 +1,121 @@ +import logging +from typing import Generic + +import aiohttp_jinja2 +import jwt +from aiohttp import web + +from ai.backend.common.logging import BraceStyleAdapter +from ai.backend.wsproxy.defs import RootContext +from ai.backend.wsproxy.exceptions import BackendError, InvalidCredentials +from ai.backend.wsproxy.proxy.backend.http import HTTPBackend +from ai.backend.wsproxy.types import ( + PERMIT_COOKIE_NAME, + Circuit, + InferenceAppInfo, + InteractiveAppInfo, + RouteInfo, + TCircuitKey, + WebRequestHandler, +) +from ai.backend.wsproxy.utils import ensure_json_serializable, is_permit_valid, mime_match + +from ..abc import AbstractFrontend + +log = BraceStyleAdapter(logging.getLogger(__spec__.name)) # type: ignore[name-defined] + + +class AbstractHTTPFrontend(Generic[TCircuitKey], AbstractFrontend[HTTPBackend, TCircuitKey]): + root_context: RootContext + + def ensure_credential(self, request: web.Request, circuit: Circuit) -> None: + if circuit.open_to_public: + return + + match circuit.app_info: + case InteractiveAppInfo(): + permit_hash = request.cookies.get(PERMIT_COOKIE_NAME) + if not permit_hash: + raise InvalidCredentials("E20004: Authorization cookie not provided") + if not is_permit_valid( + self.root_context.local_config.wsproxy.permit_hash_key, + circuit.app_info.user_id, + permit_hash, + ): + raise InvalidCredentials("E20005: Invalid authorization cookie") + case InferenceAppInfo(): + auth_header = request.headers.get("Authorization") + if not auth_header: + raise InvalidCredentials("E20006: Authorization header not provided") + auth_type, auth_key = auth_header.split(" ", maxsplit=2) + if auth_type == "BackendAI": + token = auth_key + else: + raise InvalidCredentials( + f"E20007: Unsupported authorization method {auth_type}" + ) + + try: + decoded = jwt.decode( + token, + key=self.root_context.local_config.wsproxy.jwt_encrypt_key, + algorithms=["HS256"], + ) + except jwt.PyJWTError as e: + raise InvalidCredentials from e + + if decoded.get("id") != circuit.id: + raise InvalidCredentials("E20008: Authorization token mismatch") + + async def initialize_backend(self, circuit: Circuit, routes: list[RouteInfo]) -> HTTPBackend: + return HTTPBackend(routes, self.root_context, circuit) + + async def update_backend(self, backend: HTTPBackend, routes: list[RouteInfo]) -> HTTPBackend: + backend.routes = routes + return backend + + async def terminate_backend(self, backend: HTTPBackend) -> None: + return + + async def proxy(self, request: web.Request) -> web.StreamResponse | web.WebSocketResponse: + backend: HTTPBackend = request["backend"] + + if ( + request.headers.get("connection", "").lower() == "upgrade" + and request.headers.get("upgrade", "").lower() == "websocket" + ): + return await backend.proxy_ws(request) + else: + return await backend.proxy_http(request) + + @web.middleware + async def exception_middleware( + self, request: web.Request, handler: WebRequestHandler + ) -> web.StreamResponse: + try: + resp = await handler(request) + except BackendError as ex: + if ex.status_code == 500: + log.exception("Internal server error raised inside handlers") + if mime_match( + request.headers.get("accept", "text/html"), "application/json", strict=True + ): + return web.json_response( + ensure_json_serializable(ex.body_dict), + status=ex.status_code, + ) + else: + return aiohttp_jinja2.render_template( + "error.jinja2", + request, + ex.body_dict, + ) + return resp + + @web.middleware + async def cors_middleware( + self, request: web.Request, handler: WebRequestHandler + ) -> web.StreamResponse: + resp = await handler(request) + resp.headers["Access-Control-Allow-Origin"] = "*" + return resp diff --git a/src/ai/backend/wsproxy/proxy/frontend/http/port.py b/src/ai/backend/wsproxy/proxy/frontend/http/port.py new file mode 100644 index 00000000000..266b1c0db2a --- /dev/null +++ b/src/ai/backend/wsproxy/proxy/frontend/http/port.py @@ -0,0 +1,81 @@ +import importlib.resources +import logging + +import aiohttp_jinja2 +import jinja2 +from aiohttp import web +from aiohttp.typedefs import Handler + +from ai.backend.common.logging import BraceStyleAdapter +from ai.backend.wsproxy.exceptions import GenericBadRequest +from ai.backend.wsproxy.types import Circuit + +from .abc import AbstractHTTPFrontend + +log = BraceStyleAdapter(logging.getLogger(__spec__.name)) # type: ignore[name-defined] + + +class PortFrontend(AbstractHTTPFrontend[int]): + sites: list[web.TCPSite] + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + self.sites = [] + + async def start(self) -> None: + config = self.root_context.local_config.wsproxy + port_start, port_end = config.bind_proxy_port_range + for port in range(port_start, port_end + 1): + app = web.Application() + app["port"] = port + app.middlewares.extend([ + self.cors_middleware, + self.exception_middleware, + self._ensure_slot, + ]) + app.router.add_route("*", "/{path:.*$}", self.proxy) + + with importlib.resources.as_file(importlib.resources.files("ai.backend.wsproxy")) as f: + template_path = f / "templates" + aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader(template_path)) + + runner = web.AppRunner(app, keepalive_timeout=30.0) + await runner.setup() + service_host = config.bind_host + site = web.TCPSite( + runner, + service_host, + port, + backlog=1024, + reuse_port=True, + ) + await site.start() + self.sites.append(site) + log.info( + "accepting proxy requests from {}:{}~{}", + config.bind_host, + port_start, + port_end, + ) + + async def stop(self) -> None: + for site in self.sites: + await site.stop() + + @web.middleware + async def _ensure_slot(self, request: web.Request, handler: Handler) -> web.StreamResponse: + port: int = request.app["port"] + circuit = self.circuits[port] + if not circuit: + raise GenericBadRequest(f"Unregistered slot {port}") # noqa: F821 + + self.ensure_credential(request, circuit) + circuit = self.circuits[port] + backend = self.backends[port] + request["circuit"] = circuit + request["backend"] = backend + return await handler(request) + + def get_circuit_key(self, circuit: Circuit) -> int: + return circuit.port diff --git a/src/ai/backend/wsproxy/proxy/frontend/tcp.py b/src/ai/backend/wsproxy/proxy/frontend/tcp.py new file mode 100644 index 00000000000..f77a41a4314 --- /dev/null +++ b/src/ai/backend/wsproxy/proxy/frontend/tcp.py @@ -0,0 +1,103 @@ +import asyncio +import functools +import logging +import socket + +from aiohttp import web + +from ai.backend.common.logging import BraceStyleAdapter +from ai.backend.wsproxy.defs import RootContext +from ai.backend.wsproxy.proxy.backend import TCPBackend +from ai.backend.wsproxy.types import ( + Circuit, + RouteInfo, +) + +from .abc import AbstractFrontend + +log = BraceStyleAdapter(logging.getLogger(__spec__.name)) # type: ignore[name-defined] + + +class TCPFrontend(AbstractFrontend[TCPBackend, int]): + servers: list[asyncio.Server] + server_tasks: list[asyncio.Task] + + root_context: RootContext + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + self.servers = [] + self.server_tasks = [] + + async def start(self) -> None: + config = self.root_context.local_config.wsproxy + port_start, port_end = config.bind_proxy_port_range + for port in range(port_start, port_end + 1): + service_host = config.bind_host + sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + # sock.bind() can be a blocking call only if + # we're trying to bind to a UNIX domain file or host is not an IP address + # so we don't have to wrap bind() call by run_in_executor() + sock.bind((service_host, port)) + server = await asyncio.start_server( + functools.partial(self.pipe, port), + sock=sock, + ) + self.servers.append(server) + self.server_tasks.append(asyncio.create_task(self._listen_task(port, server))) + log.info( + "accepting proxy requests from {}:{}~{}", + config.bind_host, + port_start, + port_end, + ) + + async def _listen_task(self, circuit_key: int, server: asyncio.Server) -> None: + try: + async with server: + await server.serve_forever() + except Exception: + log.exception("TCPFrontend._listen_task(c: {}): exception:", circuit_key) + raise + + async def stop(self) -> None: + for task in self.server_tasks: + task.cancel() + await task + for server in self.servers: + server.close() + await server.wait_closed() + + def ensure_credential(self, request: web.Request, circuit: Circuit) -> None: + # TCP does not support authentication + return + + async def initialize_backend(self, circuit: Circuit, routes: list[RouteInfo]) -> TCPBackend: + return TCPBackend(routes, self.root_context, circuit) + + async def update_backend(self, backend: TCPBackend, routes: list[RouteInfo]) -> TCPBackend: + backend.routes = routes + return backend + + async def terminate_backend(self, backend: TCPBackend) -> None: + return + + async def pipe( + self, circuit_key: int, reader: asyncio.StreamReader, writer: asyncio.StreamWriter + ) -> None: + backend: TCPBackend | None = self.backends.get(circuit_key) + if not backend: + writer.close() + await writer.wait_closed() + return + + try: + await backend.bind(reader, writer) + except Exception: + log.exception("TCPFrontend.pipe(k: {}):", circuit_key) + raise + + def get_circuit_key(self, circuit: Circuit) -> int: + return circuit.port diff --git a/src/ai/backend/wsproxy/registry.py b/src/ai/backend/wsproxy/registry.py new file mode 100644 index 00000000000..2964f0eb88c --- /dev/null +++ b/src/ai/backend/wsproxy/registry.py @@ -0,0 +1,60 @@ +import datetime +import uuid +from typing import Any + +from .defs import RootContext +from .exceptions import ServiceUnavailable +from .types import ( + AppMode, + Circuit, + EndpointConfig, + FrontendMode, + ProxyProtocol, + RouteInfo, + SessionConfig, +) + + +async def add_circuit( + root_ctx: RootContext, + session_info: SessionConfig, + endpoint_info: EndpointConfig | None, + app: str, + protocol: ProxyProtocol, + mode: AppMode, + routes: list[RouteInfo], + *, + envs: dict[str, Any] = {}, + args: str | None = None, + open_to_public=False, + allowed_client_ips: str | None = None, +) -> Circuit: + port_range_start, port_range_end = root_ctx.local_config.wsproxy.bind_proxy_port_range + for port in range(port_range_start, port_range_end + 1): + if port not in root_ctx.proxy_frontend.circuits: + break + else: + raise ServiceUnavailable("Port pool exhausted") + + circuit = Circuit( + id=uuid.uuid4(), + app=app, + protocol=protocol, + worker=uuid.UUID("00000000-0000-0000-0000-000000000000"), + app_mode=mode, + frontend_mode=FrontendMode.PORT, + envs=envs, + arguments=args, + port=port, + user_id=session_info.user_uuid, + access_key=session_info.access_key, + endpoint_id=(endpoint_info.id if endpoint_info else None), + route_info=routes, + session_ids=[r.session_id for r in routes], + created_at=datetime.datetime.now(), + updated_at=datetime.datetime.now(), + open_to_public=open_to_public, + allowed_client_ips=allowed_client_ips, + ) + await root_ctx.proxy_frontend.register_circuit(circuit, routes) + return circuit diff --git a/src/ai/backend/wsproxy/server.py b/src/ai/backend/wsproxy/server.py new file mode 100644 index 00000000000..49d720a733f --- /dev/null +++ b/src/ai/backend/wsproxy/server.py @@ -0,0 +1,457 @@ +import asyncio +import functools +import grp +import importlib +import importlib.resources +import logging +import os +import pwd +import sys +import traceback +import uuid +from contextlib import asynccontextmanager as actxmgr +from logging import LoggerAdapter +from pathlib import Path +from typing import Any, AsyncIterator, Final, Iterable, Mapping, Sequence, cast + +import aiohttp_cors +import aiohttp_jinja2 +import aiomonitor +import aiotools +import click +import jinja2 +from aiohttp import web +from setproctitle import setproctitle + +from ai.backend.common.logging import BraceStyleAdapter, Logger +from ai.backend.common.types import LogSeverity +from ai.backend.common.utils import env_info +from ai.backend.wsproxy.exceptions import ( + BackendError, + GenericBadRequest, + InternalServerError, + MethodNotAllowed, + URLNotFound, +) +from ai.backend.wsproxy.types import ( + AppCreator, + ProxyProtocol, + WebMiddleware, + WebRequestHandler, +) + +from . import __version__ +from .config import ServerConfig +from .config import load as load_config +from .defs import CleanupContext, RootContext +from .proxy.frontend import ( + HTTPPortFrontend, + TCPFrontend, +) +from .utils import ( + config_key_to_kebab_case, + ensure_json_serializable, + mime_match, +) + +log = BraceStyleAdapter(logging.getLogger(__spec__.name)) # type: ignore[name-defined] + +REDIS_APPPROXY_DB: Final[int] = 10 # FIXME: move to ai.backend.common.defs +EVENT_DISPATCHER_CONSUMER_GROUP: Final[str] = "appwsproxy" + +global_subapp_pkgs: Final[list[str]] = [ + ".circuit", + ".conf", + ".endpoint", + ".proxy", + ".setup", +] + + +@web.middleware +async def request_context_aware_middleware( + request: web.Request, handler: WebRequestHandler +) -> web.StreamResponse: + request_id = request.headers.get("X-BackendAI-RequestID", str(uuid.uuid4())) + request["request_id"] = request_id + request["log"] = BraceStyleAdapter(logging.getLogger(f"{__spec__.name} - #{request_id}")) # type: ignore[name-defined] + resp = await handler(request) + return resp + + +@web.middleware +async def api_middleware(request: web.Request, handler: WebRequestHandler) -> web.StreamResponse: + _handler = handler + method_override = request.headers.get("X-Method-Override", None) + if method_override: + request = request.clone(method=method_override) + new_match_info = await request.app.router.resolve(request) + if new_match_info is None: + raise InternalServerError("No matching method handler found") + _handler = new_match_info.handler + request._match_info = new_match_info # type: ignore # this is a hack + ex = request.match_info.http_exception + if ex is not None: + # handled by exception_middleware + raise ex + request_id = request.headers.get("X-BackendAI-RequestID", str(uuid.uuid4())) + request["request_id"] = request_id + request["log"] = BraceStyleAdapter(logging.getLogger(f"{__spec__.name} - #{request_id}")) # type: ignore[name-defined] + resp = await _handler(request) + return resp + + +@web.middleware +async def exception_middleware( + request: web.Request, handler: WebRequestHandler +) -> web.StreamResponse: + root_ctx: RootContext = request.app["_root.context"] + log: LoggerAdapter = request["log"] + + try: + resp = await handler(request) + except BackendError as ex: + if ex.status_code == 500: + log.exception("Internal server error raised inside handlers") + if mime_match(request.headers.get("accept", "text/html"), "application/json", strict=True): + return web.json_response( + ensure_json_serializable(ex.body_dict), + status=ex.status_code, + ) + else: + return aiohttp_jinja2.render_template( + "error.jinja2", + request, + ex.body_dict, + ) + except web.HTTPException as ex: + if ex.status_code == 404: + raise URLNotFound(extra_data=request.path) + if ex.status_code == 405: + concrete_ex = cast(web.HTTPMethodNotAllowed, ex) + raise MethodNotAllowed( + method=concrete_ex.method, allowed_methods=concrete_ex.allowed_methods + ) + log.warning("Bad request: {0!r}", ex) + raise GenericBadRequest + except asyncio.CancelledError as e: + # The server is closing or the client has disconnected in the middle of + # request. Atomic requests are still executed to their ends. + log.debug("Request cancelled ({0} {1})", request.method, request.rel_url) + raise e + except Exception as e: + log.exception("Uncaught exception in HTTP request handlers {0!r}", e) + if root_ctx.local_config.debug.enabled: + raise InternalServerError(traceback.format_exc()) + else: + raise InternalServerError() + else: + return resp + + +@actxmgr +async def proxy_frontend_ctx(root_ctx: RootContext) -> AsyncIterator[None]: + match root_ctx.local_config.wsproxy.protocol: + case ProxyProtocol.HTTP: + root_ctx.proxy_frontend = HTTPPortFrontend(root_ctx) + case ProxyProtocol.TCP: + root_ctx.proxy_frontend = TCPFrontend(root_ctx) + case _: + log.error("Unsupported protocol {}", root_ctx.local_config.wsproxy.protocol) + await root_ctx.proxy_frontend.start() + log.debug("started proxy protocol {}", root_ctx.proxy_frontend.__class__.__name__) + yield + await root_ctx.proxy_frontend.terminate_all_circuits() + await root_ctx.proxy_frontend.stop() + + +async def hello(request: web.Request) -> web.Response: + """ + Returns the API version number. + """ + return web.json_response({ + "wsproxy": __version__, + }) + + +async def status(request: web.Request) -> web.Response: + request["do_not_print_access_log"] = True + return web.json_response({"api_version": "v2"}) + + +async def on_prepare(request: web.Request, response: web.StreamResponse) -> None: + response.headers["Server"] = "BackendAI" + + +def handle_loop_error( + root_ctx: RootContext, + loop: asyncio.AbstractEventLoop, + context: Mapping[str, Any], +) -> None: + exception = context.get("exception") + msg = context.get("message", "(empty message)") + if exception is not None: + if sys.exc_info()[0] is not None: + log.exception("Error inside event loop: {0}", msg) + else: + exc_info = (type(exception), exception, exception.__traceback__) + log.error("Error inside event loop: {0}", msg, exc_info=exc_info) + + +def _init_subapp( + pkg_name: str, + root_app: web.Application, + subapp: web.Application, + global_middlewares: Iterable[WebMiddleware], +) -> None: + subapp.on_response_prepare.append(on_prepare) + + async def _set_root_ctx(subapp: web.Application): + # Allow subapp's access to the root app properties. + # These are the public APIs exposed to plugins as well. + subapp["_root.context"] = root_app["_root.context"] + + # We must copy the public interface prior to all user-defined startup signal handlers. + subapp.on_startup.insert(0, _set_root_ctx) + if "prefix" not in subapp: + subapp["prefix"] = pkg_name.split(".")[-1].replace("_", "-") + prefix = subapp["prefix"] + root_app.add_subapp("/" + prefix, subapp) + root_app.middlewares.extend(global_middlewares) + + +def init_subapp(pkg_name: str, root_app: web.Application, create_subapp: AppCreator) -> None: + root_ctx: RootContext = root_app["_root.context"] + subapp, global_middlewares = create_subapp(root_ctx.cors_options) + _init_subapp(pkg_name, root_app, subapp, global_middlewares) + + +def build_root_app( + pidx: int, + local_config: ServerConfig, + *, + cleanup_contexts: Sequence[CleanupContext] | None = None, + subapp_pkgs: Sequence[str] = [], +) -> web.Application: + app = web.Application( + middlewares=[ + request_context_aware_middleware, + exception_middleware, + api_middleware, + ] + ) + root_ctx = RootContext() + global_exception_handler = functools.partial(handle_loop_error, root_ctx) + loop = asyncio.get_running_loop() + loop.set_exception_handler(global_exception_handler) + app["_root.context"] = root_ctx + root_ctx.local_config = local_config + root_ctx.pidx = pidx + root_ctx.cors_options = { + "*": aiohttp_cors.ResourceOptions( + allow_credentials=False, expose_headers="*", allow_headers="*" + ), + } + app.on_response_prepare.append(on_prepare) + + with importlib.resources.as_file(importlib.resources.files("ai.backend.wsproxy")) as f: + template_path = f / "templates" + aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader(template_path)) + + if cleanup_contexts is None: + cleanup_contexts = [ + proxy_frontend_ctx, + ] + + async def _cleanup_context_wrapper(cctx, app: web.Application) -> AsyncIterator[None]: + # aiohttp's cleanup contexts are just async generators, not async context managers. + cctx_instance = cctx(app["_root.context"]) + app["_cctx_instances"].append(cctx_instance) + try: + async with cctx_instance: + yield + except Exception as e: + exc_info = (type(e), e, e.__traceback__) + log.error("Error initializing cleanup_contexts: {0}", cctx.__name__, exc_info=exc_info) + + async def _call_cleanup_context_shutdown_handlers(app: web.Application) -> None: + for cctx in app["_cctx_instances"]: + if hasattr(cctx, "shutdown"): + try: + await cctx.shutdown() + except Exception: + log.exception("error while shutting down a cleanup context") + + app["_cctx_instances"] = [] + app.on_shutdown.append(_call_cleanup_context_shutdown_handlers) + for cleanup_ctx in cleanup_contexts: + app.cleanup_ctx.append( + functools.partial(_cleanup_context_wrapper, cleanup_ctx), + ) + cors = aiohttp_cors.setup(app, defaults=root_ctx.cors_options) + # should be done in create_app() in other modules. + cors.add(app.router.add_route("GET", r"", hello)) + cors.add(app.router.add_route("GET", r"/", hello)) + cors.add(app.router.add_route("GET", "/status", status)) + if subapp_pkgs is None: + subapp_pkgs = [] + for pkg_name in subapp_pkgs: + if pidx == 0: + log.info("Loading module: {0}", pkg_name[1:]) + subapp_mod = importlib.import_module(pkg_name, "ai.backend.wsproxy.api") + init_subapp(pkg_name, app, getattr(subapp_mod, "create_app")) + return app + + +@actxmgr +async def server_main( + loop: asyncio.AbstractEventLoop, + pidx: int, + _args: tuple[ServerConfig, str], +) -> AsyncIterator[None]: + root_app = build_root_app(pidx, _args[0], subapp_pkgs=global_subapp_pkgs) + root_ctx: RootContext = root_app["_root.context"] + + # Start aiomonitor. + # Port is set by config (default=50100 + pidx). + loop.set_debug(root_ctx.local_config.debug.asyncio) + m = aiomonitor.Monitor( + loop, + termui_port=root_ctx.local_config.wsproxy.aiomonitor_termui_port + pidx, + webui_port=root_ctx.local_config.wsproxy.aiomonitor_webui_port + pidx, + console_enabled=False, + hook_task_factory=root_ctx.local_config.debug.enhanced_aiomonitor_task_info, + ) + m.prompt = f"monitor (wsproxy[{pidx}@{os.getpid()}]) >>> " + # Add some useful console_locals for ease of debugging + m.console_locals["root_app"] = root_app + m.console_locals["root_ctx"] = root_ctx + aiomon_started = False + try: + m.start() + aiomon_started = True + except Exception as e: + log.warning("aiomonitor could not start but skipping this error to continue", exc_info=e) + + # Plugin webapps should be loaded before runner.setup(), + # which freezes on_startup event. + try: + runner = web.AppRunner(root_app, keepalive_timeout=30.0) + await runner.setup() + site = web.TCPSite( + runner, + str(root_ctx.local_config.wsproxy.bind_host), + root_ctx.local_config.wsproxy.bind_api_port, + backlog=1024, + reuse_port=True, + ) + await site.start() + + if os.geteuid() == 0: + uid = root_ctx.local_config.wsproxy.user + gid = root_ctx.local_config.wsproxy.group + os.setgroups([ + g.gr_gid for g in grp.getgrall() if pwd.getpwuid(uid).pw_name in g.gr_mem + ]) + os.setgid(gid) + os.setuid(uid) + log.info("changed process uid and gid to {}:{}", uid, gid) + log.info( + "started handling API requests at {}:{}", + root_ctx.local_config.wsproxy.bind_host, + root_ctx.local_config.wsproxy.bind_api_port, + ) + + try: + yield + finally: + log.info("shutting down...") + await runner.cleanup() + finally: + if aiomon_started: + m.close() + + +@actxmgr +async def server_main_logwrapper( + loop: asyncio.AbstractEventLoop, + pidx: int, + _args: tuple[ServerConfig, str], +) -> AsyncIterator[None]: + setproctitle(f"backend.ai: wsproxy worker-{pidx}") + log_endpoint = _args[1] + logging_config = config_key_to_kebab_case(_args[0].logging.model_dump(exclude_none=True)) + logging_config["endpoint"] = log_endpoint + logger = Logger(logging_config, is_master=False, log_endpoint=log_endpoint) + try: + with logger: + async with server_main(loop, pidx, _args): + yield + except Exception: + traceback.print_exc() + + +@click.group(invoke_without_command=True) +@click.option( + "-f", + "--config-path", + "--config", + type=Path, + default=None, + help=("The config file path. (default: ./wsproxy.toml and /etc/backend.ai/wsproxy.toml)"), +) +@click.option( + "--log-level", + type=click.Choice([*LogSeverity.__members__.keys()], case_sensitive=False), + default="INFO", + help="Set the logging verbosity level", +) +@click.pass_context +def main(ctx: click.Context, config_path: Path, log_level: str) -> None: + """ + Start the wsproxy service as a foreground process. + """ + cfg = load_config(config_path, log_level) + + if ctx.invoked_subcommand is None: + cfg.wsproxy.pid_file.touch(exist_ok=True) + cfg.wsproxy.pid_file.write_text(str(os.getpid())) + ipc_base_path = cfg.wsproxy.ipc_base_path + ipc_base_path.mkdir(exist_ok=True, parents=True) + log_sockpath = ipc_base_path / f"worker-logger-{os.getpid()}.sock" + log_endpoint = f"ipc://{log_sockpath}" + logging_config = config_key_to_kebab_case(cfg.logging.model_dump(exclude_none=True)) + logging_config["endpoint"] = log_endpoint + try: + logger = Logger(logging_config, is_master=True, log_endpoint=log_endpoint) + with logger: + setproctitle("backend.ai: wsproxy") + log.info("Backend.AI WSProxy {0}", __version__) + log.info("runtime: {0}", env_info()) + log_config = logging.getLogger("ai.backend.wsproxy.config") + log_config.debug("debug mode enabled.") + if cfg.wsproxy.event_loop == "uvloop": + import uvloop + + uvloop.install() + log.info("Using uvloop as the event loop backend") + try: + aiotools.start_server( + server_main_logwrapper, + num_workers=1, + args=(cfg, log_endpoint), + wait_timeout=5.0, + ) + finally: + log.info("terminated.") + finally: + if cfg.wsproxy.pid_file.is_file(): + # check is_file() to prevent deleting /dev/null! + cfg.wsproxy.pid_file.unlink() + else: + # Click is going to invoke a subcommand. + pass + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/src/ai/backend/wsproxy/templates/__init__.py b/src/ai/backend/wsproxy/templates/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/ai/backend/wsproxy/templates/error.jinja2 b/src/ai/backend/wsproxy/templates/error.jinja2 new file mode 100644 index 00000000000..2792f88e8ad --- /dev/null +++ b/src/ai/backend/wsproxy/templates/error.jinja2 @@ -0,0 +1,14 @@ + + + + + + + +
+

Backend.AI

+

{{title}}

+

{{description}} {{data}} {{msg}}

+
+ + diff --git a/src/ai/backend/wsproxy/types.py b/src/ai/backend/wsproxy/types.py new file mode 100644 index 00000000000..36816acd469 --- /dev/null +++ b/src/ai/backend/wsproxy/types.py @@ -0,0 +1,221 @@ +import dataclasses +import enum +import textwrap +from dataclasses import dataclass +from datetime import datetime +from typing import ( + Annotated, + Any, + Awaitable, + Callable, + Final, + Generic, + Iterable, + Mapping, + TypeAlias, + TypeVar, +) +from uuid import UUID + +import aiohttp_cors +from aiohttp import web +from pydantic import AnyUrl, BaseModel, Field + +# FIXME: merge majority of common definitions to ai.backend.common when ready + + +class FrontendMode(str, enum.Enum): + PORT = "port" + + +class ProxyProtocol(str, enum.Enum): + HTTP = "http" + GRPC = "grpc" + HTTP2 = "h2" + TCP = "tcp" + PREOPEN = "preopen" + + +class AppMode(str, enum.Enum): + INTERACTIVE = "interactive" + INFERENCE = "inference" + + +@dataclass +class Slot: + frontend_mode: FrontendMode + in_use: bool + port: int | None + circuit_id: UUID | None + + +class EventLoopType(str, enum.Enum): + UVLOOP = "uvloop" + ASYNCIO = "asyncio" + + +class DigestModType(str, enum.Enum): + SHA1 = "sha1" + SHA224 = "sha224" + SHA256 = "sha256" + SHA384 = "sha384" + SHA512 = "sha512" + + +WebRequestHandler: TypeAlias = Callable[ + [web.Request], + Awaitable[web.StreamResponse], +] +WebMiddleware: TypeAlias = Callable[ + [web.Request, WebRequestHandler], + Awaitable[web.StreamResponse], +] + +CORSOptions: TypeAlias = Mapping[str, aiohttp_cors.ResourceOptions] +AppCreator: TypeAlias = Callable[ + [CORSOptions], + tuple[web.Application, Iterable[WebMiddleware]], +] + + +class RouteInfo(BaseModel): + session_id: UUID + session_name: Annotated[str | None, Field(default=None)] + kernel_host: str + kernel_port: int + protocol: ProxyProtocol + traffic_ratio: Annotated[float, Field(default=1.0)] + + +@dataclass +class PortFrontendInfo: + port: int + + +@dataclass +class InteractiveAppInfo: + user_id: UUID + + +@dataclass +class InferenceAppInfo: + endpoint_id: UUID + + +class Circuit(BaseModel): + id: Annotated[UUID, Field(UUID, description="ID of circuit.")] + + app: Annotated[ + str, + Field( + str, + description="Name of the Backend.AI Kernel app circuit is hosting. Can be a blank string if circuit is referencing an inference app.", + ), + ] + protocol: Annotated[ + ProxyProtocol, Field(ProxyProtocol, description="Protocol of the Backend.AI Kernel app.") + ] + worker: Annotated[UUID, Field(UUID, description="ID of the worker hosting the circuit.")] + + app_mode: Annotated[AppMode, Field(AppMode, description="Application operation mode.")] + frontend_mode: Annotated[ + FrontendMode, Field(FrontendMode, description="Frontend type of worker.") + ] + + envs: dict[str, Any] + arguments: str | None + + open_to_public: Annotated[ + bool, + Field( + bool, + description=textwrap.dedent( + """ + Shows if the circuit is open to public. + For interactive apps, this set as true means users without authorization cookie set will also be able to access application. + For inference apps it means that API will work without authorization token passed. + """ + ), + ), + ] + + allowed_client_ips: Annotated[ + str | None, + Field( + str | None, + description="Comma separated list of CIDRs accepted as traffic source. null means the circuit is accessible anywhere.", + ), + ] + + port: Annotated[ + int, + Field(int, description="Occupied worker port."), + ] + + user_id: Annotated[UUID | None, Field(UUID | None, description="Session owner's UUID.")] + access_key: Annotated[str | None, Field(str | None, description="Session owner's access key.")] + endpoint_id: Annotated[ + UUID | None, + Field( + UUID | None, description="Model service's UUID. Only set if `app_mode` is inference." + ), + ] + + route_info: Annotated[ + list[RouteInfo], Field(list[RouteInfo], description="List of kernel access information.") + ] + session_ids: list[UUID] + + created_at: datetime + updated_at: datetime + + @property + def app_info(self) -> InteractiveAppInfo | InferenceAppInfo: + match self.app_mode: + case AppMode.INTERACTIVE: + assert self.user_id + return InteractiveAppInfo(self.user_id) + case AppMode.INFERENCE: + assert self.endpoint_id + return InferenceAppInfo(self.endpoint_id) + case _: + raise KeyError(f"{self.app_mode} is not a valid app mode") + + +class Token(BaseModel): + login_session_token: str | None + kernel_host: str + kernel_port: int + session_id: UUID + user_uuid: UUID + group_id: UUID + access_key: str + domain_name: str + + +class SessionConfig(BaseModel): + id: Annotated[UUID | None, Field(default=None)] + user_uuid: UUID + group_id: UUID + access_key: Annotated[str | None, Field(default=None)] + domain_name: str + + +class EndpointConfig(BaseModel): + id: UUID + existing_url: AnyUrl | None + + +TBaseModel = TypeVar("TBaseModel", bound=BaseModel) + + +@dataclass +class PydanticResponse(Generic[TBaseModel]): + response: TBaseModel + headers: dict[str, Any] = dataclasses.field(default_factory=lambda: {}) + status: int = 200 + + +PERMIT_COOKIE_NAME: Final[str] = "appproxy_permit" + +TCircuitKey = TypeVar("TCircuitKey", int, str) diff --git a/src/ai/backend/wsproxy/utils.py b/src/ai/backend/wsproxy/utils.py new file mode 100644 index 00000000000..be91cef68aa --- /dev/null +++ b/src/ai/backend/wsproxy/utils.py @@ -0,0 +1,78 @@ +import base64 +import enum +import hashlib +import hmac +from datetime import datetime +from pathlib import Path +from typing import Any +from uuid import UUID + +import humps +from pydantic import BaseModel + +from ai.backend.common.types import HostPortPair + + +def ensure_json_serializable(o: Any) -> Any: + match o: + case dict(): + return {ensure_json_serializable(k): ensure_json_serializable(v) for k, v in o.items()} + case list(): + return [ensure_json_serializable(x) for x in o] + case UUID(): + return str(o) + case HostPortPair(): + return {"host": o.host, "port": o.port} + case Path(): + return o.as_posix() + case BaseModel(): + return ensure_json_serializable(o.model_dump()) + case enum.Enum(): + return o.value + case datetime(): + return o.timestamp() + case _: + return o + + +def config_key_to_kebab_case(o: Any) -> Any: + match o: + case dict(): + return {humps.kebabize(k): config_key_to_kebab_case(v) for k, v in o.items()} + case list(): + return [config_key_to_kebab_case(i) for i in o] + case _: + return o + + +def mime_match(base_array: str, compare: str, strict=False) -> bool: + """ + Checks if `base_array` MIME string contains `compare` MIME type. + + :param: base_array: Array of MIME strings to be compared, concatenated with comma (,) delimiter. + :param: compare: MIME string to compare. + :param: strict: If set to True, do not allow wildcard on source MIME type. + """ + for base in base_array.split(","): + _base, _, _ = base.partition(";") + base_left, _, base_right = _base.partition("/") + compare_left, compare_right = compare.split(";")[0].split("/") + if ( + not strict + and ( + (base_left == "*" and base_right == "*") + or (base_left == compare_left and base_right == "*") + ) + ) or (base_left == compare_left and base_right == compare_right): + return True + return False + + +def calculate_permit_hash(hash_key: str, user_id: UUID) -> str: + hash = hmac.new(hash_key.encode(), str(user_id).encode("utf-8"), getattr(hashlib, "sha256")) + return base64.b64encode(hash.hexdigest().encode()).decode() + + +def is_permit_valid(hash_key: str, user_id: UUID, hash: str) -> bool: + valid_hash = calculate_permit_hash(hash_key, user_id) + return valid_hash == hash diff --git a/tests/BUILD b/tests/BUILD new file mode 100644 index 00000000000..2392269d894 --- /dev/null +++ b/tests/BUILD @@ -0,0 +1 @@ +python_test_utils() diff --git a/tests/common/BUILD b/tests/common/BUILD index ee9a92b50a3..79195adb6fb 100644 --- a/tests/common/BUILD +++ b/tests/common/BUILD @@ -3,6 +3,6 @@ python_test_utils() python_tests( name="tests", overrides={ - "test_distributed.py": {"timeout": 30}, + "test_distributed.py": {"timeout": 60}, }, ) diff --git a/tests/common/conftest.py b/tests/common/conftest.py index 8141f9c909c..532c2b44c71 100644 --- a/tests/common/conftest.py +++ b/tests/common/conftest.py @@ -6,7 +6,11 @@ import pytest from ai.backend.common.etcd import AsyncEtcd, ConfigScopes -from ai.backend.testutils.bootstrap import etcd_container, redis_container # noqa: F401 +from ai.backend.testutils.bootstrap import ( # noqa: F401 + etcd_container, + redis_container, + sync_file_lock, +) def pytest_addoption(parser): diff --git a/tests/common/redis_helper/BUILD b/tests/common/redis_helper/BUILD index 9823e97ec46..7e52873809b 100644 --- a/tests/common/redis_helper/BUILD +++ b/tests/common/redis_helper/BUILD @@ -7,7 +7,6 @@ resources( ) python_test_utils( - name="test_utils", dependencies=[ ":redis-cluster-compose", ], diff --git a/tests/common/redis_helper/conftest.py b/tests/common/redis_helper/conftest.py index 457c7c20d98..64e800fc233 100644 --- a/tests/common/redis_helper/conftest.py +++ b/tests/common/redis_helper/conftest.py @@ -2,7 +2,7 @@ import asyncio import socket -from typing import AsyncIterator +from typing import AsyncIterator, cast import pytest @@ -29,9 +29,20 @@ def check_dns_config() -> None: @pytest.fixture -async def redis_cluster(test_ns, test_case_ns) -> AsyncIterator[RedisClusterInfo]: +async def redis_cluster( + request: pytest.FixtureRequest, + test_ns: str, + test_case_ns: str, +) -> AsyncIterator[RedisClusterInfo]: impl = DockerComposeRedisSentinelCluster - cluster = impl(test_ns, test_case_ns, password="develove", service_name="mymaster") + verbosity = cast(int, request.config.getoption("--verbose")) + cluster = impl( + test_ns, + test_case_ns, + password="develove", + service_name="mymaster", + verbose=verbosity > 0, + ) async with cluster.make_cluster() as info: async with asyncio.TaskGroup() as tg: for host, port in info.node_addrs: diff --git a/tests/common/redis_helper/docker.py b/tests/common/redis_helper/docker.py index 079dcb857ae..92e288c7563 100644 --- a/tests/common/redis_helper/docker.py +++ b/tests/common/redis_helper/docker.py @@ -9,11 +9,11 @@ from typing import AsyncIterator, Tuple import aiohttp -import async_timeout import pytest from packaging.version import Version from packaging.version import parse as parse_version +from ai.backend.testutils.bootstrap import get_next_tcp_port from ai.backend.testutils.pants import get_parallel_slot from .types import AbstractRedisNode, AbstractRedisSentinelCluster, RedisClusterInfo @@ -35,10 +35,22 @@ async def check_if_port_is_clear(host, port): class DockerRedisNode(AbstractRedisNode): - def __init__(self, node_type: str, port: int, container_id: str) -> None: + def __init__( + self, + node_type: str, + port: int, + container_id: str, + *, + verbose: bool = False, + ) -> None: self.node_type = node_type self.port = port self.container_id = container_id + self.verbose = verbose + self._cmd_opts = {} + if not self.verbose: + self._cmd_opts["stdout"] = asyncio.subprocess.DEVNULL + self._cmd_opts["stderr"] = asyncio.subprocess.DEVNULL @property def addr(self) -> Tuple[str, int]: @@ -50,50 +62,30 @@ def __str__(self) -> str: async def pause(self) -> None: assert self.container_id is not None print(f"Docker container {self.container_id[:12]} is being paused...") - p = await simple_run_cmd( - ["docker", "pause", self.container_id], - # stdout=asyncio.subprocess.DEVNULL, - # stderr=asyncio.subprocess.DEVNULL, - ) + p = await simple_run_cmd(["docker", "pause", self.container_id], **self._cmd_opts) await p.wait() print(f"Docker container {self.container_id[:12]} is paused") async def unpause(self) -> None: assert self.container_id is not None - p = await simple_run_cmd( - ["docker", "unpause", self.container_id], - # stdout=asyncio.subprocess.DEVNULL, - # stderr=asyncio.subprocess.DEVNULL, - ) + p = await simple_run_cmd(["docker", "unpause", self.container_id], **self._cmd_opts) await p.wait() print(f"Docker container {self.container_id[:12]} is unpaused") async def stop(self, force_kill: bool = False) -> None: assert self.container_id is not None if force_kill: - p = await simple_run_cmd( - ["docker", "kill", self.container_id], - # stdout=asyncio.subprocess.DEVNULL, - # stderr=asyncio.subprocess.DEVNULL, - ) + p = await simple_run_cmd(["docker", "kill", self.container_id], **self._cmd_opts) await p.wait() print(f"Docker container {self.container_id[:12]} is killed") else: - p = await simple_run_cmd( - ["docker", "stop", self.container_id], - # stdout=asyncio.subprocess.DEVNULL, - # stderr=asyncio.subprocess.DEVNULL, - ) + p = await simple_run_cmd(["docker", "stop", self.container_id], **self._cmd_opts) await p.wait() print(f"Docker container {self.container_id[:12]} is terminated") async def start(self) -> None: assert self.container_id is not None - p = await simple_run_cmd( - ["docker", "start", self.container_id], - # stdout=asyncio.subprocess.DEVNULL, - # stderr=asyncio.subprocess.DEVNULL, - ) + p = await simple_run_cmd(["docker", "start", self.container_id], **self._cmd_opts) await p.wait() print(f"Docker container {self.container_id[:12]} started") @@ -156,14 +148,14 @@ async def make_cluster(self) -> AsyncIterator[RedisClusterInfo]: compose_cfg_dir = ( Path.home() / ".cache" / "bai" / "testing" / f"bai-redis-test-{get_parallel_slot()}" ) - base_port = 9200 + get_parallel_slot() * 8 + allocated_ports = get_next_tcp_port(6) ports = { - "REDIS_MASTER_PORT": base_port, - "REDIS_SLAVE1_PORT": base_port + 1, - "REDIS_SLAVE2_PORT": base_port + 2, - "REDIS_SENTINEL1_PORT": base_port + 3, - "REDIS_SENTINEL2_PORT": base_port + 4, - "REDIS_SENTINEL3_PORT": base_port + 5, + "REDIS_MASTER_PORT": allocated_ports[0], + "REDIS_SLAVE1_PORT": allocated_ports[1], + "REDIS_SLAVE2_PORT": allocated_ports[2], + "REDIS_SENTINEL1_PORT": allocated_ports[3], + "REDIS_SENTINEL2_PORT": allocated_ports[4], + "REDIS_SENTINEL3_PORT": allocated_ports[5], } async with asyncio.TaskGroup() as tg: for port in ports.values(): @@ -171,25 +163,26 @@ async def make_cluster(self) -> AsyncIterator[RedisClusterInfo]: os.environ.update({k: str(v) for k, v in ports.items()}) os.environ["COMPOSE_PATH"] = str(compose_cfg_dir) os.environ["DOCKER_USER"] = f"{os.getuid()}:{os.getgid()}" + os.environ["NETWORK_NAME"] = f"testnet-{get_parallel_slot()}-{self.test_case_ns}" if compose_cfg_dir.exists(): shutil.rmtree(compose_cfg_dir) compose_cfg_dir.mkdir(parents=True) for file in template_cfg_files: shutil.copy(template_cfg_dir / file, compose_cfg_dir) - compose_tpl = (compose_cfg_dir / "sentinel.conf").read_text() - compose_tpl = compose_tpl.replace("REDIS_PASSWORD", "develove") - compose_tpl = compose_tpl.replace("REDIS_MASTER_HOST", "node01") - compose_tpl = compose_tpl.replace("REDIS_MASTER_PORT", str(ports["REDIS_MASTER_PORT"])) - sentinel01_cfg = compose_tpl.replace("REDIS_SENTINEL_SELF_HOST", "sentinel01") + sentinel_tpl = (compose_cfg_dir / "sentinel.conf").read_text() + sentinel_tpl = sentinel_tpl.replace("REDIS_PASSWORD", "develove") + sentinel_tpl = sentinel_tpl.replace("REDIS_MASTER_HOST", "node01") + sentinel_tpl = sentinel_tpl.replace("REDIS_MASTER_PORT", str(ports["REDIS_MASTER_PORT"])) + sentinel01_cfg = sentinel_tpl.replace("REDIS_SENTINEL_SELF_HOST", "sentinel01") sentinel01_cfg = sentinel01_cfg.replace( "REDIS_SENTINEL_SELF_PORT", str(ports["REDIS_SENTINEL1_PORT"]) ) - sentinel02_cfg = compose_tpl.replace("REDIS_SENTINEL_SELF_HOST", "sentinel02") + sentinel02_cfg = sentinel_tpl.replace("REDIS_SENTINEL_SELF_HOST", "sentinel02") sentinel02_cfg = sentinel02_cfg.replace( "REDIS_SENTINEL_SELF_PORT", str(ports["REDIS_SENTINEL2_PORT"]) ) - sentinel03_cfg = compose_tpl.replace("REDIS_SENTINEL_SELF_HOST", "sentinel03") + sentinel03_cfg = sentinel_tpl.replace("REDIS_SENTINEL_SELF_HOST", "sentinel03") sentinel03_cfg = sentinel03_cfg.replace( "REDIS_SENTINEL_SELF_PORT", str(ports["REDIS_SENTINEL3_PORT"]) ) @@ -199,7 +192,7 @@ async def make_cluster(self) -> AsyncIterator[RedisClusterInfo]: compose_file = compose_cfg_dir / "redis-cluster.yml" - async with async_timeout.timeout(30.0): + async with asyncio.timeout(30.0): cmdargs = [ *compose_cmd, "-p", @@ -213,8 +206,6 @@ async def make_cluster(self) -> AsyncIterator[RedisClusterInfo]: cmdargs, env=os.environ, cwd=compose_cfg_dir, - # stdout=asyncio.subprocess.DEVNULL, - # stderr=asyncio.subprocess.DEVNULL, ) await p.wait() assert p.returncode == 0, "Compose cluster creation has failed." @@ -297,12 +288,13 @@ async def make_cluster(self) -> AsyncIterator[RedisClusterInfo]: cid_mapping[container["Config"]["Labels"]["com.docker.compose.service"]] = ( container["Id"] ) - print(f"--- logs of {container['Id']} ---") - try: - p = await simple_run_cmd(["docker", "logs", container["Id"]]) - finally: - await p.wait() - print("--- end of logs ---") + if self.verbose: + print(f"--- logs of {container['Id']} ---") + try: + p = await simple_run_cmd(["docker", "logs", container["Id"]]) + finally: + await p.wait() + print("--- end of logs ---") print(f"{cids=}") print(f"{cid_mapping=}") @@ -317,16 +309,19 @@ async def make_cluster(self) -> AsyncIterator[RedisClusterInfo]: "node", ports["REDIS_MASTER_PORT"], cid_mapping["backendai-half-redis-node01"], + verbose=self.verbose, ), DockerRedisNode( "node", ports["REDIS_SLAVE1_PORT"], cid_mapping["backendai-half-redis-node02"], + verbose=self.verbose, ), DockerRedisNode( "node", ports["REDIS_SLAVE2_PORT"], cid_mapping["backendai-half-redis-node03"], + verbose=self.verbose, ), ], sentinel_addrs=[ @@ -339,22 +334,25 @@ async def make_cluster(self) -> AsyncIterator[RedisClusterInfo]: "sentinel", ports["REDIS_SENTINEL1_PORT"], cid_mapping["backendai-half-redis-sentinel01"], + verbose=self.verbose, ), DockerRedisNode( "sentinel", ports["REDIS_SENTINEL2_PORT"], cid_mapping["backendai-half-redis-sentinel02"], + verbose=self.verbose, ), DockerRedisNode( "sentinel", ports["REDIS_SENTINEL3_PORT"], cid_mapping["backendai-half-redis-sentinel03"], + verbose=self.verbose, ), ], ) finally: await asyncio.sleep(0.2) - async with async_timeout.timeout(30.0): + async with asyncio.timeout(30.0): p = await simple_run_cmd( [ *compose_cmd, @@ -370,7 +368,9 @@ async def make_cluster(self) -> AsyncIterator[RedisClusterInfo]: stderr=asyncio.subprocess.DEVNULL, ) await p.wait() - await asyncio.sleep(0.2) + async with asyncio.TaskGroup() as tg: + for port in ports.values(): + tg.create_task(check_if_port_is_clear("127.0.0.1", port)) async def main(): diff --git a/tests/common/redis_helper/redis-cluster.yml b/tests/common/redis_helper/redis-cluster.yml index a6b7c1f4003..64ab635908a 100644 --- a/tests/common/redis_helper/redis-cluster.yml +++ b/tests/common/redis_helper/redis-cluster.yml @@ -127,3 +127,4 @@ services: networks: testnet: + name: ${NETWORK_NAME:-testnet} diff --git a/tests/common/redis_helper/test_list.py b/tests/common/redis_helper/test_list.py index 919b42a6d1d..9b9968b4dd1 100644 --- a/tests/common/redis_helper/test_list.py +++ b/tests/common/redis_helper/test_list.py @@ -1,10 +1,10 @@ from __future__ import annotations import asyncio +import contextlib import traceback from typing import List, Tuple -import aiotools import pytest from redis.asyncio import Redis from redis.exceptions import ConnectionError as RedisConnectionError @@ -30,7 +30,7 @@ async def test_blist(redis_container: tuple[str, HostPortPair], disruption_metho async def pop(r: RedisConnectionInfo, key: str) -> None: try: - async with aiotools.aclosing( + async with contextlib.aclosing( redis_helper.blpop(r, key), ) as agen: async for raw_msg in agen: @@ -114,7 +114,7 @@ async def test_blist_with_retrying_rpush( async def pop(r: RedisConnectionInfo, key: str) -> None: try: - async with aiotools.aclosing( + async with contextlib.aclosing( redis_helper.blpop(r, key), ) as agen: async for raw_msg in agen: diff --git a/tests/common/redis_helper/types.py b/tests/common/redis_helper/types.py index 614e93cb4bd..08574f3d911 100644 --- a/tests/common/redis_helper/types.py +++ b/tests/common/redis_helper/types.py @@ -16,11 +16,20 @@ class RedisClusterInfo: class AbstractRedisSentinelCluster(metaclass=ABCMeta): - def __init__(self, test_ns: str, test_case_ns: str, password: str, service_name: str) -> None: + def __init__( + self, + test_ns: str, + test_case_ns: str, + password: str, + service_name: str, + *, + verbose: bool = False, + ) -> None: self.test_ns = test_ns self.test_case_ns = test_case_ns self.password = password self.service_name = service_name + self.verbose = verbose @contextlib.asynccontextmanager @abstractmethod diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000000..ae8f976d879 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1 @@ +# placeholder for global fixtures diff --git a/tests/manager/BUILD b/tests/manager/BUILD index 9e1d39237ce..c1c79ca1d7e 100644 --- a/tests/manager/BUILD +++ b/tests/manager/BUILD @@ -1,5 +1,4 @@ python_test_utils( - name="test_utils", sources=[ "conftest.py", "model_factory.py", diff --git a/tools/black-requirements.txt b/tools/black-requirements.txt index 9054b955837..c776a9e75f0 100644 --- a/tools/black-requirements.txt +++ b/tools/black-requirements.txt @@ -1 +1 @@ -black~=23.11.0 +black~=24.4 diff --git a/tools/black.lock b/tools/black.lock index 5111c8c23ee..6394e11fb63 100644 --- a/tools/black.lock +++ b/tools/black.lock @@ -6,10 +6,10 @@ // { // "version": 3, // "valid_for_interpreter_constraints": [ -// "CPython==3.12.2" +// "CPython==3.12.4" // ], // "generated_with_requirements": [ -// "black~=23.11.0" +// "black~=24.4" // ], // "manylinux": "manylinux2014", // "requirement_constraints": [], @@ -24,6 +24,7 @@ "allow_wheels": true, "build_isolation": true, "constraints": [], + "excluded": [], "locked_resolves": [ { "locked_requirements": [ @@ -31,23 +32,34 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "54caaa703227c6e0c87b76326d0862184729a69b73d3b7305b6288e1d830067e", - "url": "https://files.pythonhosted.org/packages/be/fb/8a670d2a246a351d7662e785d85a636c1c60b5800d175421cdfcb2a59b1d/black-23.11.0-py3-none-any.whl" + "hash": "d36ed1124bb81b32f8614555b34cc4259c3fbc7eec17870e8ff8ded335b58d8c", + "url": "https://files.pythonhosted.org/packages/0f/89/294c9a6b6c75a08da55e9d05321d0707e9418735e3062b12ef0f54c33474/black-24.4.2-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "4c68855825ff432d197229846f971bc4d6666ce90492e5b02013bcaca4d9ab05", - "url": "https://files.pythonhosted.org/packages/ef/21/c2d38c7c98a089fd0f7e1a8be16c07f141ed57339b3082737de90db0ca59/black-23.11.0.tar.gz" + "hash": "be8bef99eb46d5021bf053114442914baeb3649a89dc5f3a555c88737e5e98fc", + "url": "https://files.pythonhosted.org/packages/25/6d/eb15a1b155f755f43766cc473618c6e1de6555d6a1764965643f486dcf01/black-24.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" }, { "algorithm": "sha256", - "hash": "04d79b701cfc453886c69900a9a4e5e4f921f5b2a6bd8930c633c7a48f84f217", - "url": "https://media.githubusercontent.com/media/lablup/backend.ai-oven/main/pypi/projects/black/black-23.11.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl" + "hash": "c872b53057f000085da66a19c55d68f6f8ddcac2642392ad3a355878406fbd4d", + "url": "https://files.pythonhosted.org/packages/a2/47/c9997eb470a7f48f7aaddd3d9a828244a2e4199569e38128715c48059ac1/black-24.4.2.tar.gz" + }, + { + "algorithm": "sha256", + "hash": "88c57dc656038f1ab9f92b3eb5335ee9b021412feaa46330d5eba4e51fe49b04", + "url": "https://files.pythonhosted.org/packages/be/b8/9c152301774fa62a265b035a8ede4d6280827904ea1af8c3be10a28d3187/black-24.4.2-cp312-cp312-macosx_11_0_arm64.whl" + }, + { + "algorithm": "sha256", + "hash": "accf49e151c8ed2c0cdc528691838afd217c50412534e876a19270fea1e28e2d", + "url": "https://files.pythonhosted.org/packages/f4/75/3a29de3bda4006cc280d833b5d961cf7df3810a21f49e7a63a7e551fb351/black-24.4.2-cp312-cp312-macosx_10_9_x86_64.whl" } ], "project_name": "black", "requires_dists": [ - "aiohttp>=3.7.4; extra == \"d\"", + "aiohttp!=3.9.0,>=3.7.4; (sys_platform == \"win32\" and implementation_name == \"pypy\") and extra == \"d\"", + "aiohttp>=3.7.4; (sys_platform != \"win32\" or implementation_name != \"pypy\") and extra == \"d\"", "click>=8.0.0", "colorama>=0.4.3; extra == \"colorama\"", "ipython>=7.8.0; extra == \"jupyter\"", @@ -61,7 +73,7 @@ "uvloop>=0.15.2; extra == \"uvloop\"" ], "requires_python": ">=3.8", - "version": "23.11.0" + "version": "24.4.2" }, { "artifacts": [ @@ -106,19 +118,19 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5", - "url": "https://files.pythonhosted.org/packages/49/df/1fceb2f8900f8639e278b056416d49134fb8d84c5942ffaa01ad34782422/packaging-24.0-py3-none-any.whl" + "hash": "5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124", + "url": "https://files.pythonhosted.org/packages/08/aa/cc0199a5f0ad350994d660967a8efb233fe0416e4639146c089643407ce6/packaging-24.1-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9", - "url": "https://files.pythonhosted.org/packages/ee/b5/b43a27ac7472e1818c4bafd44430e69605baefe1f34440593e0332ec8b4d/packaging-24.0.tar.gz" + "hash": "026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", + "url": "https://files.pythonhosted.org/packages/51/65/50db4dda066951078f0a96cf12f4b9ada6e4b811516bf0262c0f4f7064d4/packaging-24.1.tar.gz" } ], "project_name": "packaging", "requires_dists": [], - "requires_python": ">=3.7", - "version": "24.0" + "requires_python": ">=3.8", + "version": "24.1" }, { "artifacts": [ @@ -142,13 +154,13 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068", - "url": "https://files.pythonhosted.org/packages/55/72/4898c44ee9ea6f43396fbc23d9bfaf3d06e01b83698bdf2e4c919deceb7c/platformdirs-4.2.0-py3-none-any.whl" + "hash": "2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee", + "url": "https://files.pythonhosted.org/packages/68/13/2aa1f0e1364feb2c9ef45302f387ac0bd81484e9c9a4c5688a322fbdfd08/platformdirs-4.2.2-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768", - "url": "https://files.pythonhosted.org/packages/96/dc/c1d911bf5bb0fdc58cc05010e9f3efe3b67970cef779ba7fbc3183b987a8/platformdirs-4.2.0.tar.gz" + "hash": "38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3", + "url": "https://files.pythonhosted.org/packages/f5/52/0763d1d976d5c262df53ddda8d8d4719eedf9594d046f117c25a27261a19/platformdirs-4.2.2.tar.gz" } ], "project_name": "platformdirs", @@ -156,6 +168,7 @@ "appdirs==1.4.4; extra == \"test\"", "covdefaults>=2.3; extra == \"test\"", "furo>=2023.9.10; extra == \"docs\"", + "mypy>=1.8; extra == \"type\"", "proselint>=0.13; extra == \"docs\"", "pytest-cov>=4.1; extra == \"test\"", "pytest-mock>=3.12; extra == \"test\"", @@ -164,7 +177,7 @@ "sphinx>=7.2.6; extra == \"docs\"" ], "requires_python": ">=3.8", - "version": "4.2.0" + "version": "4.2.2" } ], "platform_tag": null @@ -172,15 +185,16 @@ ], "only_builds": [], "only_wheels": [], + "overridden": [], "path_mappings": {}, - "pex_version": "2.3.0", - "pip_version": "24.0", + "pex_version": "2.10.0", + "pip_version": "24.1.2", "prefer_older_binary": false, "requirements": [ - "black~=23.11.0" + "black~=24.4" ], "requires_python": [ - "==3.12.2" + "==3.12.4" ], "resolver_version": "pip-2020-resolver", "style": "universal", diff --git a/tools/coverage-py.lock b/tools/coverage-py.lock index f2a0cbbe6dd..b925ace8e5c 100644 --- a/tools/coverage-py.lock +++ b/tools/coverage-py.lock @@ -6,7 +6,7 @@ // { // "version": 3, // "valid_for_interpreter_constraints": [ -// "CPython==3.12.2" +// "CPython==3.12.4" // ], // "generated_with_requirements": [ // "coverage[toml]<7.0,>=6.4" @@ -54,14 +54,14 @@ "only_builds": [], "only_wheels": [], "path_mappings": {}, - "pex_version": "2.3.0", + "pex_version": "2.3.1", "pip_version": "24.0", "prefer_older_binary": false, "requirements": [ "coverage[toml]<7.0,>=6.4" ], "requires_python": [ - "==3.12.2" + "==3.12.4" ], "resolver_version": "pip-2020-resolver", "style": "universal", diff --git a/tools/mypy.lock b/tools/mypy.lock index 4f1c0394a2b..2c7a4a7cddf 100644 --- a/tools/mypy.lock +++ b/tools/mypy.lock @@ -6,7 +6,7 @@ // { // "version": 3, // "valid_for_interpreter_constraints": [ -// "CPython==3.12.2" +// "CPython==3.12.4" // ], // "generated_with_requirements": [ // "mypy==1.10.0" @@ -100,19 +100,19 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a", - "url": "https://files.pythonhosted.org/packages/01/f3/936e209267d6ef7510322191003885de524fc48d1b43269810cd589ceaf5/typing_extensions-4.11.0-py3-none-any.whl" + "hash": "04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", + "url": "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0", - "url": "https://files.pythonhosted.org/packages/f6/f3/b827b3ab53b4e3d8513914586dcca61c355fa2ce8252dea4da56e67bf8f2/typing_extensions-4.11.0.tar.gz" + "hash": "1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", + "url": "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz" } ], "project_name": "typing-extensions", "requires_dists": [], "requires_python": ">=3.8", - "version": "4.11.0" + "version": "4.12.2" } ], "platform_tag": null @@ -121,14 +121,14 @@ "only_builds": [], "only_wheels": [], "path_mappings": {}, - "pex_version": "2.3.0", + "pex_version": "2.3.1", "pip_version": "24.0", "prefer_older_binary": false, "requirements": [ "mypy==1.10.0" ], "requires_python": [ - "==3.12.2" + "==3.12.4" ], "resolver_version": "pip-2020-resolver", "style": "universal", diff --git a/tools/pants-plugins/scie/README.md b/tools/pants-plugins/scie/README.md index f996ac7a139..2a3cb1fd888 100644 --- a/tools/pants-plugins/scie/README.md +++ b/tools/pants-plugins/scie/README.md @@ -105,8 +105,8 @@ description = "An example FastAPI Lift application including using an external u [[lift.interpreters]] id = "cpython" provider = "PythonBuildStandalone" -release = "20240224" -version = "3.12.2" +release = "20240713" +version = "3.12.4" [[lift.files]] # Note the leading colon, which is required to reference the pex_binary dependency diff --git a/tools/pants-plugins/scie/config.py b/tools/pants-plugins/scie/config.py index bbe451305c3..04bfc1923fa 100644 --- a/tools/pants-plugins/scie/config.py +++ b/tools/pants-plugins/scie/config.py @@ -58,7 +58,7 @@ class Interpreter: version: str id: str = "cpython" provider: str = "PythonBuildStandalone" - release: str = "20240224" + release: str = "20240713" lazy: bool = False diff --git a/tools/pants-plugins/scie/subsystems.py b/tools/pants-plugins/scie/subsystems.py index 6e1b4a0f94b..fb0ebdcf918 100644 --- a/tools/pants-plugins/scie/subsystems.py +++ b/tools/pants-plugins/scie/subsystems.py @@ -15,12 +15,12 @@ class Science(TemplatedExternalTool): options_scope = "science" help = softwrap("""A high level tool to build scies with.""") - default_version = "0.3.0" + default_version = "0.4.1" default_known_versions = [ - "0.3.0|linux_arm64|8a134f2f307137319300d695aa177551a4a4d508cd6324a0aad09d7365edfdef|6364946", - "0.3.0|linux_x86_64|60730e7d03888254d7b41f5aace431f4264ee20d80924b989d4106b3e2f238dc|7258131", - "0.3.0|macos_arm64|badfafe685138bf8606d96e7501723f24b9127b9d4e415e2125cf4f06f7f7f64|4185377", - "0.3.0|macos_x86_64|bcae03dbd58b8412f1b3e62f1d882b424a15e60848c392dd4c19601cd234477c|4317644", + "0.4.1|linux_arm64|d2983bc3b293ae59f9aad83968a1ac41d1c761b53504819b243fd8a40e5db30f|8547671", + "0.4.1|linux_x86_64|f5eda054ae3a2ce14e029d723acac7a2e76f21051fdbdad86adfd0916f512887|9706622", + "0.4.1|macos_arm64|cd2edd426d706181bace3b1663aef429e753072a73b850b69d971136ab23ff92|4286359", + "0.4.1|macos_x86_64|0b5969f379baa9e32f832996d134ac11b84618103c58a1231d81d5a98c5570e9|4483114", ] default_url_template = ( diff --git a/tools/pytest.lock b/tools/pytest.lock index 2ceb7ec166f..007cfe34cb5 100644 --- a/tools/pytest.lock +++ b/tools/pytest.lock @@ -6,7 +6,7 @@ // { // "version": 3, // "valid_for_interpreter_constraints": [ -// "CPython==3.12.2" +// "CPython==3.12.4" // ], // "generated_with_requirements": [ // "aioresponses>=0.7.6", @@ -38,73 +38,73 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "ff30218887e62209942f91ac1be902cc80cddb86bf00fbc6783b7a43b2bea26f", - "url": "https://files.pythonhosted.org/packages/78/4c/579dcd801e1d98a8cb9144005452c65bcdaf5cce0aff1d6363385a8062b3/aiohttp-3.9.3-cp312-cp312-musllinux_1_1_x86_64.whl" + "hash": "c671dc117c2c21a1ca10c116cfcd6e3e44da7fcde37bf83b2be485ab377b25da", + "url": "https://files.pythonhosted.org/packages/5c/f1/f61b397a0eaf01d197e610b0f56935b0002d688f27d73af2882b282fc2f8/aiohttp-3.9.5-cp312-cp312-musllinux_1_1_x86_64.whl" }, { "algorithm": "sha256", - "hash": "38a19bc3b686ad55804ae931012f78f7a534cce165d089a2059f658f6c91fa60", - "url": "https://files.pythonhosted.org/packages/02/fe/b15ae84c4641ff829154d7a6646c4ba4612208ab28229c90bf0844e59e18/aiohttp-3.9.3-cp312-cp312-macosx_10_9_universal2.whl" + "hash": "edea7d15772ceeb29db4aff55e482d4bcfb6ae160ce144f2682de02f6d693551", + "url": "https://files.pythonhosted.org/packages/04/a4/e3679773ea7eb5b37a2c998e25b017cc5349edf6ba2739d1f32855cfb11b/aiohttp-3.9.5.tar.gz" }, { "algorithm": "sha256", - "hash": "e5e46b578c0e9db71d04c4b506a2121c0cb371dd89af17a0586ff6769d4c58c1", - "url": "https://files.pythonhosted.org/packages/03/20/0a43a00edd6a401369ceb38bfe07a67823337dd26102e760d3230e0dedcf/aiohttp-3.9.3-cp312-cp312-musllinux_1_1_i686.whl" + "hash": "18f634d540dd099c262e9f887c8bbacc959847cfe5da7a0e2e1cf3f14dbf2daf", + "url": "https://files.pythonhosted.org/packages/0c/ea/8e1bd13e39b3f4c37889b8480f04ed398e07017f5709d66d4e1d0dee39fe/aiohttp-3.9.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl" }, { "algorithm": "sha256", - "hash": "ba39e9c8627edc56544c8628cc180d88605df3892beeb2b94c9bc857774848ca", - "url": "https://files.pythonhosted.org/packages/0e/91/fdd26fc726d7ece6bf735a8613893e14dea5de8cc90757de4a412fe89355/aiohttp-3.9.3-cp312-cp312-musllinux_1_1_aarch64.whl" + "hash": "0a158704edf0abcac8ac371fbb54044f3270bdbc93e254a82b6c82be1ef08f3c", + "url": "https://files.pythonhosted.org/packages/18/5f/f6428eb55244d44e1c674c8c823ae1567136ac1d2f8b128e194dd4febbe1/aiohttp-3.9.5-cp312-cp312-macosx_10_9_x86_64.whl" }, { "algorithm": "sha256", - "hash": "90842933e5d1ff760fae6caca4b2b3edba53ba8f4b71e95dacf2818a2aca06f7", - "url": "https://files.pythonhosted.org/packages/18/93/1f005bbe044471a0444a82cdd7356f5120b9cf94fe2c50c0cdbf28f1258b/aiohttp-3.9.3.tar.gz" + "hash": "320e8618eda64e19d11bdb3bd04ccc0a816c17eaecb7e4945d01deee2a22f95f", + "url": "https://files.pythonhosted.org/packages/2a/ac/7c00027510f42a21c0a905f2472d9afef7ea276573357829bfe8c12883d4/aiohttp-3.9.5-cp312-cp312-musllinux_1_1_aarch64.whl" }, { "algorithm": "sha256", - "hash": "770d015888c2a598b377bd2f663adfd947d78c0124cfe7b959e1ef39f5b13869", - "url": "https://files.pythonhosted.org/packages/5f/75/b3f077038cb3a8d83cd4d128e23d432bd40b6efd79e6f4361551f3c92e5e/aiohttp-3.9.3-cp312-cp312-macosx_10_9_x86_64.whl" + "hash": "393c7aba2b55559ef7ab791c94b44f7482a07bf7640d17b341b79081f5e5cd1a", + "url": "https://files.pythonhosted.org/packages/54/8e/72d1ddd6e653b6d4b7b1fece7619287d3319bae10ad3a7f12d956bcc9e96/aiohttp-3.9.5-cp312-cp312-musllinux_1_1_s390x.whl" }, { "algorithm": "sha256", - "hash": "c3452ea726c76e92f3b9fae4b34a151981a9ec0a4847a627c43d71a15ac32aa6", - "url": "https://files.pythonhosted.org/packages/64/df/5cddb631867dbc85c058efcb16cbccb72f8bf66c0f6dca38dee346f4699a/aiohttp-3.9.3-cp312-cp312-musllinux_1_1_s390x.whl" + "hash": "c7a4b7a6cf5b6eb11e109a9755fd4fda7d57395f8c575e166d363b9fc3ec4678", + "url": "https://files.pythonhosted.org/packages/5e/25/c6bd6cb160a4dc81f83adbc9bdd6758f01932a6c81a3e4ac707746e7855e/aiohttp-3.9.5-cp312-cp312-macosx_10_9_universal2.whl" }, { "algorithm": "sha256", - "hash": "dc9b311743a78043b26ffaeeb9715dc360335e5517832f5a8e339f8a43581e4d", - "url": "https://files.pythonhosted.org/packages/6f/82/58ceac3a641202957466a532e9f92f439c6a71b74a4ffcc1919e270703d2/aiohttp-3.9.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl" + "hash": "d153f652a687a8e95ad367a86a61e8d53d528b0530ef382ec5aaf533140ed00f", + "url": "https://files.pythonhosted.org/packages/78/28/2080ed3140b7d25c406f77fe2d5776edd9c7a25228f7f905d7058a6e2d61/aiohttp-3.9.5-cp312-cp312-macosx_11_0_arm64.whl" }, { "algorithm": "sha256", - "hash": "938a9653e1e0c592053f815f7028e41a3062e902095e5a7dc84617c87267ebd5", - "url": "https://files.pythonhosted.org/packages/72/09/1f36849c36b7929dd09e013c637808fcaf908a0aa543388c2903dbb68bba/aiohttp-3.9.3-cp312-cp312-musllinux_1_1_ppc64le.whl" + "hash": "8c64a6dc3fe5db7b1b4d2b5cb84c4f677768bdc340611eca673afb7cf416ef5a", + "url": "https://files.pythonhosted.org/packages/88/31/e55083b026428324cde827c04bdfbc837c131f9d3ee38d28c766614b09ef/aiohttp-3.9.5-cp312-cp312-musllinux_1_1_ppc64le.whl" }, { "algorithm": "sha256", - "hash": "ee43080e75fc92bf36219926c8e6de497f9b247301bbf88c5c7593d931426679", - "url": "https://files.pythonhosted.org/packages/98/e4/6e56f3d2a9404192ed46ad8edf7c676aafeb8f342ca134d69fed920a59f3/aiohttp-3.9.3-cp312-cp312-macosx_11_0_arm64.whl" + "hash": "8676e8fd73141ded15ea586de0b7cda1542960a7b9ad89b2b06428e97125d4fa", + "url": "https://files.pythonhosted.org/packages/a6/39/ca4fc97af53167ff6c8888a59002b17447bddd8dd474ae0f0e778446cfe7/aiohttp-3.9.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl" }, { "algorithm": "sha256", - "hash": "a6fe5571784af92b6bc2fda8d1925cccdf24642d49546d3144948a6a1ed58ca5", - "url": "https://files.pythonhosted.org/packages/e2/11/4bd14dee3b507dbe20413e972c10accb79de8390ddac5154ef076c1ca31a/aiohttp-3.9.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl" + "hash": "82a6a97d9771cb48ae16979c3a3a9a18b600a8505b1115cfe354dfb2054468b4", + "url": "https://files.pythonhosted.org/packages/d3/c0/cd9d02e1b9e1b1073c94f7692ffe69067987c4acc0252bbc0c7645360d37/aiohttp-3.9.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl" }, { "algorithm": "sha256", - "hash": "52df73f14ed99cee84865b95a3d9e044f226320a87af208f068ecc33e0c35b96", - "url": "https://files.pythonhosted.org/packages/e9/18/64c65a8ead659bae24a47a8197195be4340f26260e4363bd4924346b9343/aiohttp-3.9.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl" + "hash": "da00da442a0e31f1c69d26d224e1efd3a1ca5bcbf210978a2ca7426dfcae9f58", + "url": "https://files.pythonhosted.org/packages/dd/0a/526c8480bd846b9155c624c7e54db94733fc6b381dfd748cc8dd69c994b0/aiohttp-3.9.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" }, { "algorithm": "sha256", - "hash": "b955ed993491f1a5da7f92e98d5dad3c1e14dc175f74517c4e610b1f2456fb11", - "url": "https://files.pythonhosted.org/packages/ef/d1/6aea10c955896329402950407823625ab3a549b99e9c1e97fc61e5622b8a/aiohttp-3.9.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl" + "hash": "2faa61a904b83142747fc6a6d7ad8fccff898c849123030f8e75d5d967fd4a81", + "url": "https://files.pythonhosted.org/packages/e3/f5/e0c216a12b2490cbecd79e9b7671f4e50dfc72e9a52347943aabe6f5bc44/aiohttp-3.9.5-cp312-cp312-musllinux_1_1_i686.whl" }, { "algorithm": "sha256", - "hash": "504b6981675ace64c28bf4a05a508af5cde526e36492c98916127f5a02354d53", - "url": "https://files.pythonhosted.org/packages/fd/4f/5c6041fca616a1cafa4914f630d6898085afe4683be5387a4054da55f52a/aiohttp-3.9.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" + "hash": "60cdbd56f4cad9f69c35eaac0fbbdf1f77b0ff9456cebd4902f3dd1cf096464c", + "url": "https://files.pythonhosted.org/packages/f2/fb/d65d58230e9ed5cfed886b0c433634bfb14cbe183125e84de909559e29e7/aiohttp-3.9.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl" } ], "project_name": "aiohttp", @@ -120,7 +120,7 @@ "yarl<2.0,>=1.0" ], "requires_python": ">=3.8", - "version": "3.9.3" + "version": "3.9.5" }, { "artifacts": [ @@ -207,53 +207,53 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "b2c5edc4ac10a7ef6605a966c58929ec6c1bd0917fb8c15cb3363f65aa40e677", - "url": "https://files.pythonhosted.org/packages/99/15/dbcb5d0a22bf5357cf456dfd16f9ceb89c54544d6201d53bc77c75077a8e/coverage-7.4.4-pp38.pp39.pp310-none-any.whl" + "hash": "6fe885135c8a479d3e37a7aae61cbd3a0fb2deccb4dda3c25f92a49189f766d6", + "url": "https://files.pythonhosted.org/packages/ea/69/2b79b6b37c57cd05c85b76ec5ceabf7e091ab0f4986dfefaddbb468881c0/coverage-7.6.0-pp38.pp39.pp310-none-any.whl" }, { "algorithm": "sha256", - "hash": "3ea79bb50e805cd6ac058dfa3b5c8f6c040cb87fe83de10845857f5535d1db70", - "url": "https://files.pythonhosted.org/packages/30/1a/105f0139df6a2adbcaa0c110711a46dbd9f59e93a09ca15a97d59c2564f2/coverage-7.4.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl" + "hash": "da8549d17489cd52f85a9829d0e1d91059359b3c54a26f28bec2c5d369524807", + "url": "https://files.pythonhosted.org/packages/0c/a2/d7c0988df525298b2c19c482cec27f76bbeba6c3ed7f85d9f79d8996e509/coverage-7.6.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl" }, { "algorithm": "sha256", - "hash": "d898fe162d26929b5960e4e138651f7427048e72c853607f2b200909794ed978", - "url": "https://files.pythonhosted.org/packages/41/6d/e142c823e5d4b24481f990da4cf9d2d577a6f4e1fb6faf39d9a4e42b1d43/coverage-7.4.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl" + "hash": "9cc44bf0315268e253bf563f3560e6c004efe38f76db03a1558274a6e04bf5d5", + "url": "https://files.pythonhosted.org/packages/0e/2a/f62d42a48449b26cfdf940661cf28bccc27e199dc0e956c738a6b1c942af/coverage-7.6.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl" }, { "algorithm": "sha256", - "hash": "41c9c5f3de16b903b610d09650e5e27adbfa7f500302718c9ffd1c12cf9d6818", - "url": "https://files.pythonhosted.org/packages/88/92/07f9c593cd27e3c595b8cb83b95adad8c9ba3d611debceed097a5fd6be4b/coverage-7.4.4-cp312-cp312-macosx_11_0_arm64.whl" + "hash": "1fad32ee9b27350687035cb5fdf9145bc9cf0a094a9577d43e909948ebcfa27b", + "url": "https://files.pythonhosted.org/packages/2a/ce/375f8fbbabc51e3dfce91355460912c930a4e241ffbafc1f3a35f43ac90b/coverage-7.6.0-cp312-cp312-musllinux_1_2_aarch64.whl" }, { "algorithm": "sha256", - "hash": "00838a35b882694afda09f85e469c96367daa3f3f2b097d846a7216993d37f4c", - "url": "https://files.pythonhosted.org/packages/92/12/2303d1c543a11ea060dbc7144ed3174fc09107b5dd333649415c95ede58b/coverage-7.4.4-cp312-cp312-musllinux_1_1_aarch64.whl" + "hash": "f7db0b6ae1f96ae41afe626095149ecd1b212b424626175a6633c2999eaad45b", + "url": "https://files.pythonhosted.org/packages/57/3a/287ea47cca84c92528b5f9b34b971f845b637d37c3eead9f10aede3531da/coverage-7.6.0-cp312-cp312-macosx_10_9_x86_64.whl" }, { "algorithm": "sha256", - "hash": "fdfafb32984684eb03c2d83e1e51f64f0906b11e64482df3c5db936ce3839d48", - "url": "https://files.pythonhosted.org/packages/96/5a/7d0e945c4759fe9d19aad1679dd3096aeb4cb9fcf0062fe24554dc4787b8/coverage-7.4.4-cp312-cp312-musllinux_1_1_i686.whl" + "hash": "289cc803fa1dc901f84701ac10c9ee873619320f2f9aff38794db4a4a0268d51", + "url": "https://files.pythonhosted.org/packages/64/c8/a94ce9e17756aed521085ae716d627623374d34f92c1daf7162272ecb030/coverage-7.6.0.tar.gz" }, { "algorithm": "sha256", - "hash": "ce4b94265ca988c3f8e479e741693d143026632672e3ff924f25fab50518dd51", - "url": "https://files.pythonhosted.org/packages/98/79/185cb42910b6a2b2851980407c8445ac0da0750dff65e420e86f973c8396/coverage-7.4.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl" + "hash": "76d5f82213aa78098b9b964ea89de4617e70e0d43e97900c2778a50856dac605", + "url": "https://files.pythonhosted.org/packages/64/ea/848f064727fe172e80f8a7abc77664c593b6bece14d5acab7d7087f1244e/coverage-7.6.0-cp312-cp312-musllinux_1_2_x86_64.whl" }, { "algorithm": "sha256", - "hash": "201bef2eea65e0e9c56343115ba3814e896afe6d36ffd37bab783261db430f76", - "url": "https://files.pythonhosted.org/packages/a0/de/a54b245e781bfd6f3fd7ce5566a695686b5c25ee7c743f514e7634428972/coverage-7.4.4-cp312-cp312-macosx_10_9_x86_64.whl" + "hash": "bbdf9a72403110a3bdae77948b8011f644571311c2fb35ee15f0f10a8fc082e8", + "url": "https://files.pythonhosted.org/packages/7b/14/3432bbdabeaa79de25421d24161ab472578ffe73fc56b0aa9411bea66335/coverage-7.6.0-cp312-cp312-macosx_11_0_arm64.whl" }, { "algorithm": "sha256", - "hash": "c901df83d097649e257e803be22592aedfd5182f07b3cc87d640bbb9afd50f49", - "url": "https://files.pythonhosted.org/packages/bf/d5/f809d8b630cf4c11fe490e20037a343d12a74ec2783c6cdb5aee725e7137/coverage-7.4.4.tar.gz" + "hash": "0086cd4fc71b7d485ac93ca4239c8f75732c2ae3ba83f6be1c9be59d9e2c6382", + "url": "https://files.pythonhosted.org/packages/f2/aa/0419103c357bfd95a65d7b2e2249f9f1d79194241c5e87819cd81d36b96c/coverage-7.6.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl" }, { "algorithm": "sha256", - "hash": "69eb372f7e2ece89f14751fbcbe470295d73ed41ecd37ca36ed2eb47512a6ab9", - "url": "https://files.pythonhosted.org/packages/f4/1b/79cdb7b11bbbd6540a536ac79412904b5c1f8903d5c1330084212afa8ceb/coverage-7.4.4-cp312-cp312-musllinux_1_1_x86_64.whl" + "hash": "044a0985a4f25b335882b0966625270a8d9db3d3409ddc49a4eb00b0ef5e8cee", + "url": "https://files.pythonhosted.org/packages/fd/ca/5eb0004e0bf66db1d4a18c67e4aece76ff409b061d85f31843c28c9a531c/coverage-7.6.0-cp312-cp312-musllinux_1_2_i686.whl" } ], "project_name": "coverage", @@ -261,7 +261,7 @@ "tomli; python_full_version <= \"3.11.0a6\" and extra == \"toml\"" ], "requires_python": ">=3.8", - "version": "7.4.4" + "version": "7.6.0" }, { "artifacts": [ @@ -350,19 +350,19 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f", - "url": "https://files.pythonhosted.org/packages/c2/e7/a82b05cf63a603df6e68d59ae6a68bf5064484a0718ea5033660af4b54a9/idna-3.6-py3-none-any.whl" + "hash": "82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0", + "url": "https://files.pythonhosted.org/packages/e5/3e/741d8c82801c347547f8a2a06aa57dbb1992be9e948df2ea0eda2c8b79e8/idna-3.7-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca", - "url": "https://files.pythonhosted.org/packages/bf/3f/ea4b9117521a1e9c50344b909be7886dd00a519552724809bb1f486986c2/idna-3.6.tar.gz" + "hash": "028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc", + "url": "https://files.pythonhosted.org/packages/21/ed/f86a79a07470cb07819390452f178b3bef1d375f2ec021ecfc709fc7cf07/idna-3.7.tar.gz" } ], "project_name": "idna", "requires_dists": [], "requires_python": ">=3.5", - "version": "3.6" + "version": "3.7" }, { "artifacts": [ @@ -469,31 +469,31 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5", - "url": "https://files.pythonhosted.org/packages/49/df/1fceb2f8900f8639e278b056416d49134fb8d84c5942ffaa01ad34782422/packaging-24.0-py3-none-any.whl" + "hash": "5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124", + "url": "https://files.pythonhosted.org/packages/08/aa/cc0199a5f0ad350994d660967a8efb233fe0416e4639146c089643407ce6/packaging-24.1-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9", - "url": "https://files.pythonhosted.org/packages/ee/b5/b43a27ac7472e1818c4bafd44430e69605baefe1f34440593e0332ec8b4d/packaging-24.0.tar.gz" + "hash": "026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", + "url": "https://files.pythonhosted.org/packages/51/65/50db4dda066951078f0a96cf12f4b9ada6e4b811516bf0262c0f4f7064d4/packaging-24.1.tar.gz" } ], "project_name": "packaging", "requires_dists": [], - "requires_python": ">=3.7", - "version": "24.0" + "requires_python": ">=3.8", + "version": "24.1" }, { "artifacts": [ { "algorithm": "sha256", - "hash": "7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981", - "url": "https://files.pythonhosted.org/packages/a5/5b/0cc789b59e8cc1bf288b38111d002d8c5917123194d45b29dcdac64723cc/pluggy-1.4.0-py3-none-any.whl" + "hash": "44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", + "url": "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be", - "url": "https://files.pythonhosted.org/packages/54/c6/43f9d44d92aed815e781ca25ba8c174257e27253a94630d21be8725a2b59/pluggy-1.4.0.tar.gz" + "hash": "2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", + "url": "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz" } ], "project_name": "pluggy", @@ -504,7 +504,7 @@ "tox; extra == \"dev\"" ], "requires_python": ">=3.8", - "version": "1.4.0" + "version": "1.5.0" }, { "artifacts": [ @@ -569,13 +569,13 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "68516fdd1018ac57b846c9846b954f0393b26f094764a28c955eabb0536a4e8a", - "url": "https://files.pythonhosted.org/packages/e0/c9/de22c040d4c821c6c797ca1d720f1f4b2f4293d5757e811c62ae544496c4/pytest_asyncio-0.23.6-py3-none-any.whl" + "hash": "009b48127fbe44518a547bddd25611551b0e43ccdbf1e67d12479f569832c20b", + "url": "https://files.pythonhosted.org/packages/e5/98/947690b1a79af83e584143cb904497caff05bb6016614b38326a81076357/pytest_asyncio-0.23.7-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "ffe523a89c1c222598c76856e76852b787504ddb72dd5d9b6617ffa8aa2cde5f", - "url": "https://files.pythonhosted.org/packages/cd/ef/80107b9e939875ad613c705d99d91e4510dcf5fed29613ac9aecbcba0a8d/pytest-asyncio-0.23.6.tar.gz" + "hash": "5f5c72948f4c49e7db4f29f2521d4031f1c27f86e57b046126654083d4770268", + "url": "https://files.pythonhosted.org/packages/13/d9/1dcac9b3fc6eccf8f1e3a657439c11ffc5cf762edd20f65577f832ba248b/pytest_asyncio-0.23.7.tar.gz" } ], "project_name": "pytest-asyncio", @@ -587,7 +587,7 @@ "sphinx>=5.3; extra == \"docs\"" ], "requires_python": ">=3.8", - "version": "0.23.6" + "version": "0.23.7" }, { "artifacts": [ @@ -678,68 +678,60 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "c21c49fb1042386df081cb5d86759792ab89efca84cf114889191cd09aacc80c", - "url": "https://files.pythonhosted.org/packages/92/e1/1c8bb3420105e70bdf357d57dd5567202b4ef8d27f810e98bb962d950834/setuptools-69.2.0-py3-none-any.whl" + "hash": "fe384da74336c398e0d956d1cae0669bc02eed936cdb1d49b57de1990dc11ffc", + "url": "https://files.pythonhosted.org/packages/ef/15/88e46eb9387e905704b69849618e699dc2f54407d8953cc4ec4b8b46528d/setuptools-70.3.0-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "0ff4183f8f42cd8fa3acea16c45205521a4ef28f73c6391d8a25e92893134f2e", - "url": "https://files.pythonhosted.org/packages/4d/5b/dc575711b6b8f2f866131a40d053e30e962e633b332acf7cd2c24843d83d/setuptools-69.2.0.tar.gz" + "hash": "f171bab1dfbc86b132997f26a119f6056a57950d058587841a0082e8830f9dc5", + "url": "https://files.pythonhosted.org/packages/65/d8/10a70e86f6c28ae59f101a9de6d77bf70f147180fbf40c3af0f64080adc3/setuptools-70.3.0.tar.gz" } ], "project_name": "setuptools", "requires_dists": [ - "build[virtualenv]; extra == \"testing\"", - "build[virtualenv]>=1.0.3; extra == \"testing-integration\"", - "filelock>=3.4.0; extra == \"testing\"", - "filelock>=3.4.0; extra == \"testing-integration\"", - "furo; extra == \"docs\"", - "importlib-metadata; extra == \"testing\"", - "ini2toml[lite]>=0.9; extra == \"testing\"", - "jaraco.develop>=7.21; (python_version >= \"3.9\" and sys_platform != \"cygwin\") and extra == \"testing\"", - "jaraco.envs>=2.2; extra == \"testing\"", - "jaraco.envs>=2.2; extra == \"testing-integration\"", - "jaraco.packaging>=9.3; extra == \"docs\"", - "jaraco.path>=3.2.0; extra == \"testing\"", - "jaraco.path>=3.2.0; extra == \"testing-integration\"", - "jaraco.tidelift>=1.4; extra == \"docs\"", - "mypy==1.9; extra == \"testing\"", - "packaging>=23.2; extra == \"testing\"", - "packaging>=23.2; extra == \"testing-integration\"", - "pip>=19.1; extra == \"testing\"", - "pygments-github-lexers==0.0.5; extra == \"docs\"", - "pytest-checkdocs>=2.4; extra == \"testing\"", - "pytest-cov; platform_python_implementation != \"PyPy\" and extra == \"testing\"", - "pytest-enabler; extra == \"testing-integration\"", - "pytest-enabler>=2.2; extra == \"testing\"", - "pytest-home>=0.5; extra == \"testing\"", - "pytest-mypy>=0.9.1; platform_python_implementation != \"PyPy\" and extra == \"testing\"", - "pytest-perf; sys_platform != \"cygwin\" and extra == \"testing\"", - "pytest-ruff>=0.2.1; sys_platform != \"cygwin\" and extra == \"testing\"", - "pytest-timeout; extra == \"testing\"", - "pytest-xdist; extra == \"testing-integration\"", - "pytest-xdist>=3; extra == \"testing\"", - "pytest; extra == \"testing-integration\"", - "pytest>=6; extra == \"testing\"", - "rst.linker>=1.9; extra == \"docs\"", - "sphinx-favicon; extra == \"docs\"", - "sphinx-inline-tabs; extra == \"docs\"", - "sphinx-lint; extra == \"docs\"", - "sphinx-notfound-page<2,>=1; extra == \"docs\"", - "sphinx-reredirects; extra == \"docs\"", - "sphinx<7.2.5; extra == \"docs\"", - "sphinx>=3.5; extra == \"docs\"", - "sphinxcontrib-towncrier; extra == \"docs\"", - "tomli-w>=1.0.0; extra == \"testing\"", - "tomli; extra == \"testing\"", - "tomli; extra == \"testing-integration\"", - "virtualenv>=13.0.0; extra == \"testing\"", - "virtualenv>=13.0.0; extra == \"testing-integration\"", - "wheel; extra == \"testing\"", - "wheel; extra == \"testing-integration\"" + "build[virtualenv]>=1.0.3; extra == \"test\"", + "filelock>=3.4.0; extra == \"test\"", + "furo; extra == \"doc\"", + "importlib-metadata; extra == \"test\"", + "ini2toml[lite]>=0.14; extra == \"test\"", + "jaraco.develop>=7.21; (python_version >= \"3.9\" and sys_platform != \"cygwin\") and extra == \"test\"", + "jaraco.envs>=2.2; extra == \"test\"", + "jaraco.packaging>=9.3; extra == \"doc\"", + "jaraco.path>=3.2.0; extra == \"test\"", + "jaraco.test; extra == \"test\"", + "jaraco.tidelift>=1.4; extra == \"doc\"", + "mypy==1.10.0; extra == \"test\"", + "packaging>=23.2; extra == \"test\"", + "pip>=19.1; extra == \"test\"", + "pygments-github-lexers==0.0.5; extra == \"doc\"", + "pyproject-hooks!=1.1; extra == \"doc\"", + "pyproject-hooks!=1.1; extra == \"test\"", + "pytest!=8.1.*,>=6; extra == \"test\"", + "pytest-checkdocs>=2.4; extra == \"test\"", + "pytest-cov; extra == \"test\"", + "pytest-enabler>=2.2; extra == \"test\"", + "pytest-home>=0.5; extra == \"test\"", + "pytest-mypy; extra == \"test\"", + "pytest-perf; sys_platform != \"cygwin\" and extra == \"test\"", + "pytest-ruff>=0.3.2; sys_platform != \"cygwin\" and extra == \"test\"", + "pytest-subprocess; extra == \"test\"", + "pytest-timeout; extra == \"test\"", + "pytest-xdist>=3; extra == \"test\"", + "rst.linker>=1.9; extra == \"doc\"", + "sphinx-favicon; extra == \"doc\"", + "sphinx-inline-tabs; extra == \"doc\"", + "sphinx-lint; extra == \"doc\"", + "sphinx-notfound-page<2,>=1; extra == \"doc\"", + "sphinx-reredirects; extra == \"doc\"", + "sphinx>=3.5; extra == \"doc\"", + "sphinxcontrib-towncrier; extra == \"doc\"", + "tomli-w>=1.0.0; extra == \"test\"", + "tomli; extra == \"test\"", + "virtualenv>=13.0.0; extra == \"test\"", + "wheel; extra == \"test\"" ], "requires_python": ">=3.8", - "version": "69.2.0" + "version": "70.3.0" }, { "artifacts": [ @@ -835,7 +827,7 @@ "only_builds": [], "only_wheels": [], "path_mappings": {}, - "pex_version": "2.3.0", + "pex_version": "2.3.1", "pip_version": "24.0", "prefer_older_binary": false, "requirements": [ @@ -849,7 +841,7 @@ "pytest~=7.4.3" ], "requires_python": [ - "==3.12.2" + "==3.12.4" ], "resolver_version": "pip-2020-resolver", "style": "universal", diff --git a/tools/ruff.lock b/tools/ruff.lock index fc4be277690..26851ef2531 100644 --- a/tools/ruff.lock +++ b/tools/ruff.lock @@ -6,7 +6,7 @@ // { // "version": 3, // "valid_for_interpreter_constraints": [ -// "CPython==3.12.2" +// "CPython==3.12.4" // ], // "generated_with_requirements": [ // "ruff-lsp~=0.0.49", @@ -123,19 +123,19 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5", - "url": "https://files.pythonhosted.org/packages/49/df/1fceb2f8900f8639e278b056416d49134fb8d84c5942ffaa01ad34782422/packaging-24.0-py3-none-any.whl" + "hash": "5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124", + "url": "https://files.pythonhosted.org/packages/08/aa/cc0199a5f0ad350994d660967a8efb233fe0416e4639146c089643407ce6/packaging-24.1-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9", - "url": "https://files.pythonhosted.org/packages/ee/b5/b43a27ac7472e1818c4bafd44430e69605baefe1f34440593e0332ec8b4d/packaging-24.0.tar.gz" + "hash": "026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", + "url": "https://files.pythonhosted.org/packages/51/65/50db4dda066951078f0a96cf12f4b9ada6e4b811516bf0262c0f4f7064d4/packaging-24.1.tar.gz" } ], "project_name": "packaging", "requires_dists": [], - "requires_python": ">=3.7", - "version": "24.0" + "requires_python": ">=3.8", + "version": "24.1" }, { "artifacts": [ @@ -241,13 +241,13 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "5ea39968510d046b3c62dc5a7e3b52e867c2de14af34a406883fe05d55bab2b0", - "url": "https://files.pythonhosted.org/packages/d5/7f/56faa24dd96deea5831bfe48558bd2c501f9c44a9b00a14839eff19b84a3/ruff_lsp-0.0.53-py3-none-any.whl" + "hash": "1cc7d2f1cb69cbea1dfeba0f2d7dd5832bc68b0b052c7166530bcce63aa75f57", + "url": "https://files.pythonhosted.org/packages/b0/84/fdf5ea32fb3bedb784f5c45d1888c905a71031b65b016a33c828bfc6a131/ruff_lsp-0.0.54-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "de38eccd06020350630ac3518fe04a9640c8f66908758d8a623b5ea021bf84b0", - "url": "https://files.pythonhosted.org/packages/4e/18/04110904e240a2bb1a95f3f63b49374961d752b30e3f6726b4d6fa6aa9fc/ruff_lsp-0.0.53.tar.gz" + "hash": "33e1d4dd20ca481fc6a811afcfdd451798c22fc39f2104df23c2855e322a0582", + "url": "https://files.pythonhosted.org/packages/aa/ff/68b1dd0bf5bce5d4bfbad6fcfddc7fc46ea070e0d191aa9a2972acb51f8a/ruff_lsp-0.0.54.tar.gz" } ], "project_name": "ruff-lsp", @@ -257,32 +257,32 @@ "packaging>=23.1", "pip-tools<7.0.0,>=6.13.0; extra == \"dev\"", "pygls>=1.1.0", - "pytest-asyncio==0.21.1; extra == \"dev\"", + "pytest-asyncio==0.21.2; extra == \"dev\"", "pytest<8.0.0,>=7.3.1; extra == \"dev\"", "python-lsp-jsonrpc==1.0.0; extra == \"dev\"", "ruff>=0.0.274", "typing-extensions" ], "requires_python": ">=3.7", - "version": "0.0.53" + "version": "0.0.54" }, { "artifacts": [ { "algorithm": "sha256", - "hash": "69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475", - "url": "https://files.pythonhosted.org/packages/f9/de/dc04a3ea60b22624b51c703a84bbe0184abcd1d0b9bc8074b5d6b7ab90bb/typing_extensions-4.10.0-py3-none-any.whl" + "hash": "04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", + "url": "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb", - "url": "https://files.pythonhosted.org/packages/16/3a/0d26ce356c7465a19c9ea8814b960f8a36c3b0d07c323176620b7b483e44/typing_extensions-4.10.0.tar.gz" + "hash": "1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", + "url": "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz" } ], "project_name": "typing-extensions", "requires_dists": [], "requires_python": ">=3.8", - "version": "4.10.0" + "version": "4.12.2" } ], "platform_tag": null @@ -291,7 +291,7 @@ "only_builds": [], "only_wheels": [], "path_mappings": {}, - "pex_version": "2.3.0", + "pex_version": "2.3.1", "pip_version": "24.0", "prefer_older_binary": false, "requirements": [ @@ -299,7 +299,7 @@ "ruff~=0.1.11" ], "requires_python": [ - "==3.12.2" + "==3.12.4" ], "resolver_version": "pip-2020-resolver", "style": "universal", diff --git a/tools/setuptools-requirements.txt b/tools/setuptools-requirements.txt index e19a510d175..f0797315a05 100644 --- a/tools/setuptools-requirements.txt +++ b/tools/setuptools-requirements.txt @@ -1,2 +1,2 @@ -setuptools~=69.2.0 -wheel~=0.43.0 \ No newline at end of file +setuptools~=70.3.0 +wheel~=0.43.0 diff --git a/tools/setuptools.lock b/tools/setuptools.lock index a0a4e5da35a..a19f74830f5 100644 --- a/tools/setuptools.lock +++ b/tools/setuptools.lock @@ -6,10 +6,10 @@ // { // "version": 3, // "valid_for_interpreter_constraints": [ -// "CPython==3.12.2" +// "CPython==3.12.4" // ], // "generated_with_requirements": [ -// "setuptools~=69.2.0", +// "setuptools~=70.3.0", // "wheel~=0.43.0" // ], // "manylinux": "manylinux2014", @@ -25,6 +25,7 @@ "allow_wheels": true, "build_isolation": true, "constraints": [], + "excluded": [], "locked_resolves": [ { "locked_requirements": [ @@ -32,68 +33,60 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "c21c49fb1042386df081cb5d86759792ab89efca84cf114889191cd09aacc80c", - "url": "https://files.pythonhosted.org/packages/92/e1/1c8bb3420105e70bdf357d57dd5567202b4ef8d27f810e98bb962d950834/setuptools-69.2.0-py3-none-any.whl" + "hash": "fe384da74336c398e0d956d1cae0669bc02eed936cdb1d49b57de1990dc11ffc", + "url": "https://files.pythonhosted.org/packages/ef/15/88e46eb9387e905704b69849618e699dc2f54407d8953cc4ec4b8b46528d/setuptools-70.3.0-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "0ff4183f8f42cd8fa3acea16c45205521a4ef28f73c6391d8a25e92893134f2e", - "url": "https://files.pythonhosted.org/packages/4d/5b/dc575711b6b8f2f866131a40d053e30e962e633b332acf7cd2c24843d83d/setuptools-69.2.0.tar.gz" + "hash": "f171bab1dfbc86b132997f26a119f6056a57950d058587841a0082e8830f9dc5", + "url": "https://files.pythonhosted.org/packages/65/d8/10a70e86f6c28ae59f101a9de6d77bf70f147180fbf40c3af0f64080adc3/setuptools-70.3.0.tar.gz" } ], "project_name": "setuptools", "requires_dists": [ - "build[virtualenv]; extra == \"testing\"", - "build[virtualenv]>=1.0.3; extra == \"testing-integration\"", - "filelock>=3.4.0; extra == \"testing\"", - "filelock>=3.4.0; extra == \"testing-integration\"", - "furo; extra == \"docs\"", - "importlib-metadata; extra == \"testing\"", - "ini2toml[lite]>=0.9; extra == \"testing\"", - "jaraco.develop>=7.21; (python_version >= \"3.9\" and sys_platform != \"cygwin\") and extra == \"testing\"", - "jaraco.envs>=2.2; extra == \"testing\"", - "jaraco.envs>=2.2; extra == \"testing-integration\"", - "jaraco.packaging>=9.3; extra == \"docs\"", - "jaraco.path>=3.2.0; extra == \"testing\"", - "jaraco.path>=3.2.0; extra == \"testing-integration\"", - "jaraco.tidelift>=1.4; extra == \"docs\"", - "mypy==1.9; extra == \"testing\"", - "packaging>=23.2; extra == \"testing\"", - "packaging>=23.2; extra == \"testing-integration\"", - "pip>=19.1; extra == \"testing\"", - "pygments-github-lexers==0.0.5; extra == \"docs\"", - "pytest-checkdocs>=2.4; extra == \"testing\"", - "pytest-cov; platform_python_implementation != \"PyPy\" and extra == \"testing\"", - "pytest-enabler; extra == \"testing-integration\"", - "pytest-enabler>=2.2; extra == \"testing\"", - "pytest-home>=0.5; extra == \"testing\"", - "pytest-mypy>=0.9.1; platform_python_implementation != \"PyPy\" and extra == \"testing\"", - "pytest-perf; sys_platform != \"cygwin\" and extra == \"testing\"", - "pytest-ruff>=0.2.1; sys_platform != \"cygwin\" and extra == \"testing\"", - "pytest-timeout; extra == \"testing\"", - "pytest-xdist; extra == \"testing-integration\"", - "pytest-xdist>=3; extra == \"testing\"", - "pytest; extra == \"testing-integration\"", - "pytest>=6; extra == \"testing\"", - "rst.linker>=1.9; extra == \"docs\"", - "sphinx-favicon; extra == \"docs\"", - "sphinx-inline-tabs; extra == \"docs\"", - "sphinx-lint; extra == \"docs\"", - "sphinx-notfound-page<2,>=1; extra == \"docs\"", - "sphinx-reredirects; extra == \"docs\"", - "sphinx<7.2.5; extra == \"docs\"", - "sphinx>=3.5; extra == \"docs\"", - "sphinxcontrib-towncrier; extra == \"docs\"", - "tomli-w>=1.0.0; extra == \"testing\"", - "tomli; extra == \"testing\"", - "tomli; extra == \"testing-integration\"", - "virtualenv>=13.0.0; extra == \"testing\"", - "virtualenv>=13.0.0; extra == \"testing-integration\"", - "wheel; extra == \"testing\"", - "wheel; extra == \"testing-integration\"" + "build[virtualenv]>=1.0.3; extra == \"test\"", + "filelock>=3.4.0; extra == \"test\"", + "furo; extra == \"doc\"", + "importlib-metadata; extra == \"test\"", + "ini2toml[lite]>=0.14; extra == \"test\"", + "jaraco.develop>=7.21; (python_version >= \"3.9\" and sys_platform != \"cygwin\") and extra == \"test\"", + "jaraco.envs>=2.2; extra == \"test\"", + "jaraco.packaging>=9.3; extra == \"doc\"", + "jaraco.path>=3.2.0; extra == \"test\"", + "jaraco.test; extra == \"test\"", + "jaraco.tidelift>=1.4; extra == \"doc\"", + "mypy==1.10.0; extra == \"test\"", + "packaging>=23.2; extra == \"test\"", + "pip>=19.1; extra == \"test\"", + "pygments-github-lexers==0.0.5; extra == \"doc\"", + "pyproject-hooks!=1.1; extra == \"doc\"", + "pyproject-hooks!=1.1; extra == \"test\"", + "pytest!=8.1.*,>=6; extra == \"test\"", + "pytest-checkdocs>=2.4; extra == \"test\"", + "pytest-cov; extra == \"test\"", + "pytest-enabler>=2.2; extra == \"test\"", + "pytest-home>=0.5; extra == \"test\"", + "pytest-mypy; extra == \"test\"", + "pytest-perf; sys_platform != \"cygwin\" and extra == \"test\"", + "pytest-ruff>=0.3.2; sys_platform != \"cygwin\" and extra == \"test\"", + "pytest-subprocess; extra == \"test\"", + "pytest-timeout; extra == \"test\"", + "pytest-xdist>=3; extra == \"test\"", + "rst.linker>=1.9; extra == \"doc\"", + "sphinx-favicon; extra == \"doc\"", + "sphinx-inline-tabs; extra == \"doc\"", + "sphinx-lint; extra == \"doc\"", + "sphinx-notfound-page<2,>=1; extra == \"doc\"", + "sphinx-reredirects; extra == \"doc\"", + "sphinx>=3.5; extra == \"doc\"", + "sphinxcontrib-towncrier; extra == \"doc\"", + "tomli-w>=1.0.0; extra == \"test\"", + "tomli; extra == \"test\"", + "virtualenv>=13.0.0; extra == \"test\"", + "wheel; extra == \"test\"" ], "requires_python": ">=3.8", - "version": "69.2.0" + "version": "70.3.0" }, { "artifacts": [ @@ -122,16 +115,17 @@ ], "only_builds": [], "only_wheels": [], + "overridden": [], "path_mappings": {}, - "pex_version": "2.3.0", - "pip_version": "24.0", + "pex_version": "2.10.0", + "pip_version": "24.1.2", "prefer_older_binary": false, "requirements": [ - "setuptools~=69.2.0", + "setuptools~=70.3.0", "wheel~=0.43.0" ], "requires_python": [ - "==3.12.2" + "==3.12.4" ], "resolver_version": "pip-2020-resolver", "style": "universal", diff --git a/tools/towncrier.lock b/tools/towncrier.lock index 8c1432f6c1e..c67753b4bcf 100644 --- a/tools/towncrier.lock +++ b/tools/towncrier.lock @@ -6,7 +6,7 @@ // { // "version": 3, // "valid_for_interpreter_constraints": [ -// "CPython==3.12.2" +// "CPython==3.12.4" // ], // "generated_with_requirements": [ // "towncrier~=22.12" @@ -97,13 +97,13 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa", - "url": "https://files.pythonhosted.org/packages/30/6d/6de6be2d02603ab56e72997708809e8a5b0fbfee080735109b40a3564843/Jinja2-3.1.3-py3-none-any.whl" + "hash": "bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d", + "url": "https://files.pythonhosted.org/packages/31/80/3a54838c3fb461f6fec263ebf3a3a41771bd05190238de3486aae8540c36/jinja2-3.1.4-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90", - "url": "https://files.pythonhosted.org/packages/b2/5e/3a21abf3cd467d7876045335e681d276ac32492febe6d98ad89562d1a7e1/Jinja2-3.1.3.tar.gz" + "hash": "4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369", + "url": "https://files.pythonhosted.org/packages/ed/55/39036716d19cab0747a5020fc7e907f362fbf48c984b14e62127f7e68e5d/jinja2-3.1.4.tar.gz" } ], "project_name": "jinja2", @@ -112,7 +112,7 @@ "MarkupSafe>=2.0" ], "requires_python": ">=3.7", - "version": "3.1.3" + "version": "3.1.4" }, { "artifacts": [ @@ -171,68 +171,60 @@ "artifacts": [ { "algorithm": "sha256", - "hash": "c21c49fb1042386df081cb5d86759792ab89efca84cf114889191cd09aacc80c", - "url": "https://files.pythonhosted.org/packages/92/e1/1c8bb3420105e70bdf357d57dd5567202b4ef8d27f810e98bb962d950834/setuptools-69.2.0-py3-none-any.whl" + "hash": "fe384da74336c398e0d956d1cae0669bc02eed936cdb1d49b57de1990dc11ffc", + "url": "https://files.pythonhosted.org/packages/ef/15/88e46eb9387e905704b69849618e699dc2f54407d8953cc4ec4b8b46528d/setuptools-70.3.0-py3-none-any.whl" }, { "algorithm": "sha256", - "hash": "0ff4183f8f42cd8fa3acea16c45205521a4ef28f73c6391d8a25e92893134f2e", - "url": "https://files.pythonhosted.org/packages/4d/5b/dc575711b6b8f2f866131a40d053e30e962e633b332acf7cd2c24843d83d/setuptools-69.2.0.tar.gz" + "hash": "f171bab1dfbc86b132997f26a119f6056a57950d058587841a0082e8830f9dc5", + "url": "https://files.pythonhosted.org/packages/65/d8/10a70e86f6c28ae59f101a9de6d77bf70f147180fbf40c3af0f64080adc3/setuptools-70.3.0.tar.gz" } ], "project_name": "setuptools", "requires_dists": [ - "build[virtualenv]; extra == \"testing\"", - "build[virtualenv]>=1.0.3; extra == \"testing-integration\"", - "filelock>=3.4.0; extra == \"testing\"", - "filelock>=3.4.0; extra == \"testing-integration\"", - "furo; extra == \"docs\"", - "importlib-metadata; extra == \"testing\"", - "ini2toml[lite]>=0.9; extra == \"testing\"", - "jaraco.develop>=7.21; (python_version >= \"3.9\" and sys_platform != \"cygwin\") and extra == \"testing\"", - "jaraco.envs>=2.2; extra == \"testing\"", - "jaraco.envs>=2.2; extra == \"testing-integration\"", - "jaraco.packaging>=9.3; extra == \"docs\"", - "jaraco.path>=3.2.0; extra == \"testing\"", - "jaraco.path>=3.2.0; extra == \"testing-integration\"", - "jaraco.tidelift>=1.4; extra == \"docs\"", - "mypy==1.9; extra == \"testing\"", - "packaging>=23.2; extra == \"testing\"", - "packaging>=23.2; extra == \"testing-integration\"", - "pip>=19.1; extra == \"testing\"", - "pygments-github-lexers==0.0.5; extra == \"docs\"", - "pytest-checkdocs>=2.4; extra == \"testing\"", - "pytest-cov; platform_python_implementation != \"PyPy\" and extra == \"testing\"", - "pytest-enabler; extra == \"testing-integration\"", - "pytest-enabler>=2.2; extra == \"testing\"", - "pytest-home>=0.5; extra == \"testing\"", - "pytest-mypy>=0.9.1; platform_python_implementation != \"PyPy\" and extra == \"testing\"", - "pytest-perf; sys_platform != \"cygwin\" and extra == \"testing\"", - "pytest-ruff>=0.2.1; sys_platform != \"cygwin\" and extra == \"testing\"", - "pytest-timeout; extra == \"testing\"", - "pytest-xdist; extra == \"testing-integration\"", - "pytest-xdist>=3; extra == \"testing\"", - "pytest; extra == \"testing-integration\"", - "pytest>=6; extra == \"testing\"", - "rst.linker>=1.9; extra == \"docs\"", - "sphinx-favicon; extra == \"docs\"", - "sphinx-inline-tabs; extra == \"docs\"", - "sphinx-lint; extra == \"docs\"", - "sphinx-notfound-page<2,>=1; extra == \"docs\"", - "sphinx-reredirects; extra == \"docs\"", - "sphinx<7.2.5; extra == \"docs\"", - "sphinx>=3.5; extra == \"docs\"", - "sphinxcontrib-towncrier; extra == \"docs\"", - "tomli-w>=1.0.0; extra == \"testing\"", - "tomli; extra == \"testing\"", - "tomli; extra == \"testing-integration\"", - "virtualenv>=13.0.0; extra == \"testing\"", - "virtualenv>=13.0.0; extra == \"testing-integration\"", - "wheel; extra == \"testing\"", - "wheel; extra == \"testing-integration\"" + "build[virtualenv]>=1.0.3; extra == \"test\"", + "filelock>=3.4.0; extra == \"test\"", + "furo; extra == \"doc\"", + "importlib-metadata; extra == \"test\"", + "ini2toml[lite]>=0.14; extra == \"test\"", + "jaraco.develop>=7.21; (python_version >= \"3.9\" and sys_platform != \"cygwin\") and extra == \"test\"", + "jaraco.envs>=2.2; extra == \"test\"", + "jaraco.packaging>=9.3; extra == \"doc\"", + "jaraco.path>=3.2.0; extra == \"test\"", + "jaraco.test; extra == \"test\"", + "jaraco.tidelift>=1.4; extra == \"doc\"", + "mypy==1.10.0; extra == \"test\"", + "packaging>=23.2; extra == \"test\"", + "pip>=19.1; extra == \"test\"", + "pygments-github-lexers==0.0.5; extra == \"doc\"", + "pyproject-hooks!=1.1; extra == \"doc\"", + "pyproject-hooks!=1.1; extra == \"test\"", + "pytest!=8.1.*,>=6; extra == \"test\"", + "pytest-checkdocs>=2.4; extra == \"test\"", + "pytest-cov; extra == \"test\"", + "pytest-enabler>=2.2; extra == \"test\"", + "pytest-home>=0.5; extra == \"test\"", + "pytest-mypy; extra == \"test\"", + "pytest-perf; sys_platform != \"cygwin\" and extra == \"test\"", + "pytest-ruff>=0.3.2; sys_platform != \"cygwin\" and extra == \"test\"", + "pytest-subprocess; extra == \"test\"", + "pytest-timeout; extra == \"test\"", + "pytest-xdist>=3; extra == \"test\"", + "rst.linker>=1.9; extra == \"doc\"", + "sphinx-favicon; extra == \"doc\"", + "sphinx-inline-tabs; extra == \"doc\"", + "sphinx-lint; extra == \"doc\"", + "sphinx-notfound-page<2,>=1; extra == \"doc\"", + "sphinx-reredirects; extra == \"doc\"", + "sphinx>=3.5; extra == \"doc\"", + "sphinxcontrib-towncrier; extra == \"doc\"", + "tomli-w>=1.0.0; extra == \"test\"", + "tomli; extra == \"test\"", + "virtualenv>=13.0.0; extra == \"test\"", + "wheel; extra == \"test\"" ], "requires_python": ">=3.8", - "version": "69.2.0" + "version": "70.3.0" }, { "artifacts": [ @@ -270,14 +262,14 @@ "only_builds": [], "only_wheels": [], "path_mappings": {}, - "pex_version": "2.3.0", + "pex_version": "2.3.1", "pip_version": "24.0", "prefer_older_binary": false, "requirements": [ "towncrier~=22.12" ], "requires_python": [ - "==3.12.2" + "==3.12.4" ], "resolver_version": "pip-2020-resolver", "style": "universal",