From 1610ed44ee216f115631d2671e21c72066f24497 Mon Sep 17 00:00:00 2001 From: Dan Salmon Date: Fri, 28 Jul 2023 21:54:51 -0500 Subject: [PATCH] =?UTF-8?q?Golang=20Rewrite=20=F0=9F=8C=9F=20(#134)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * golang rewrite * update package name * cleanup --- .coveragerc | 4 - .dev/docker-compose.yml | 38 ++ .github/ISSUE_TEMPLATE/bug.yml | 74 +++ .github/workflows/docker-publish.yml | 87 ++++ .github/workflows/golangci-lint.yml | 55 +++ .github/workflows/pr_test-coverage.yml | 54 +++ .github/workflows/pr_test-matrix.yml | 56 +++ .github/workflows/release.yml | 28 ++ .gitignore | 30 +- .travis.yml | 28 -- Dockerfile | 6 - LICENSE | 2 +- Makefile | 17 + README.md | 354 +++++++++++---- S3Scanner/S3Bucket.py | 162 ------- S3Scanner/S3Service.py | 485 -------------------- S3Scanner/__init__.py | 0 S3Scanner/__main__.py | 246 ---------- S3Scanner/exceptions.py | 18 - bucket/_test_/buckets.txt | 6 + bucket/bucket.go | 265 +++++++++++ bucket/bucket_test.go | 352 +++++++++++++++ cmd/mqingest/mqingest.go | 123 +++++ config.yml | 14 + conftest.py | 15 - db/database.go | 62 +++ db/database_test.go | 85 ++++ go.mod | 60 +++ go.sum | 553 +++++++++++++++++++++++ groups/groups.go | 17 + log/formatter_hook.go | 43 ++ log/nested_json_formatter.go | 38 ++ main.go | 473 +++++++++++++++++++ main_test.go | 179 ++++++++ mq/mq.go | 40 ++ packaging/.goreleaser.yaml | 47 ++ packaging/docker/.dockerignore | 6 + packaging/docker/Dockerfile | 17 + permission/permission.go | 153 +++++++ permission/permission_test.go | 159 +++++++ provider/aws.go | 150 +++++++ provider/aws_test.go | 139 ++++++ provider/custom.go | 113 +++++ provider/custom_test.go | 30 ++ provider/digitalocean.go | 100 +++++ provider/digitalocean_test.go | 60 +++ provider/dreamhost.go | 100 +++++ provider/gcp.go | 65 +++ provider/linode.go | 100 +++++ provider/providers.go | 242 ++++++++++ provider/providers_test.go | 217 +++++++++ pyproject.toml | 6 - requirements.txt | 1 - setup.cfg | 33 -- tests/TestUtils.py | 49 -- tests/test_bucket.py | 48 -- tests/test_scanner.py | 139 ------ tests/test_service.py | 600 ------------------------- 58 files changed, 4696 insertions(+), 1947 deletions(-) delete mode 100644 .coveragerc create mode 100644 .dev/docker-compose.yml create mode 100644 .github/ISSUE_TEMPLATE/bug.yml create mode 100644 .github/workflows/docker-publish.yml create mode 100644 .github/workflows/golangci-lint.yml create mode 100644 .github/workflows/pr_test-coverage.yml create mode 100644 .github/workflows/pr_test-matrix.yml create mode 100644 .github/workflows/release.yml delete mode 100644 .travis.yml delete mode 100644 Dockerfile create mode 100644 Makefile delete mode 100644 S3Scanner/S3Bucket.py delete mode 100644 S3Scanner/S3Service.py delete mode 100644 S3Scanner/__init__.py delete mode 100644 S3Scanner/__main__.py delete mode 100644 S3Scanner/exceptions.py create mode 100644 bucket/_test_/buckets.txt create mode 100644 bucket/bucket.go create mode 100644 bucket/bucket_test.go create mode 100644 cmd/mqingest/mqingest.go create mode 100644 config.yml delete mode 100644 conftest.py create mode 100644 db/database.go create mode 100644 db/database_test.go create mode 100644 go.mod create mode 100644 go.sum create mode 100644 groups/groups.go create mode 100644 log/formatter_hook.go create mode 100644 log/nested_json_formatter.go create mode 100644 main.go create mode 100644 main_test.go create mode 100644 mq/mq.go create mode 100644 packaging/.goreleaser.yaml create mode 100644 packaging/docker/.dockerignore create mode 100644 packaging/docker/Dockerfile create mode 100644 permission/permission.go create mode 100644 permission/permission_test.go create mode 100644 provider/aws.go create mode 100644 provider/aws_test.go create mode 100644 provider/custom.go create mode 100644 provider/custom_test.go create mode 100644 provider/digitalocean.go create mode 100644 provider/digitalocean_test.go create mode 100644 provider/dreamhost.go create mode 100644 provider/gcp.go create mode 100644 provider/linode.go create mode 100644 provider/providers.go create mode 100644 provider/providers_test.go delete mode 100644 pyproject.toml delete mode 100644 requirements.txt delete mode 100644 setup.cfg delete mode 100644 tests/TestUtils.py delete mode 100644 tests/test_bucket.py delete mode 100644 tests/test_scanner.py delete mode 100644 tests/test_service.py diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 717d2f4..0000000 --- a/.coveragerc +++ /dev/null @@ -1,4 +0,0 @@ -[run] -omit = - venv* - tests/TestUtils.py \ No newline at end of file diff --git a/.dev/docker-compose.yml b/.dev/docker-compose.yml new file mode 100644 index 0000000..88ea869 --- /dev/null +++ b/.dev/docker-compose.yml @@ -0,0 +1,38 @@ +version: "2.1" + +services: + dev_mq: + image: rabbitmq:3-management + hostname: dev_mq + container_name: dev_mq + healthcheck: + test: rabbitmq-diagnostics -q ping + interval: 2s + timeout: 3s + retries: 10 + ports: + - 5672:5672 + - 15672:15672 +# volumes: +# - "rabbitmq_data:/var/lib/rabbitmq" + environment: + - ALLOW_ANONYMOUS_LOGIN=yes + app_dev: + image: golang:1.18-alpine + container_name: app_dev + volumes: + - $PWD:/app + entrypoint: ["tail", "-f", "/dev/null"] + db_dev: + image: postgres:12-alpine + container_name: db_dev + volumes: + - db_dev:/var/lib/postgresql/data + ports: + - "5432:5432" + environment: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: example + +volumes: + db_dev: \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml new file mode 100644 index 0000000..46082b8 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -0,0 +1,74 @@ +name: Bug Report +description: Report a bug in S3Scanner +labels: + - "bug" + - "triage" +body: + - type: checkboxes + attributes: + label: Existing Issues + description: Please search the existing issues before creating a new one + options: + - label: I've searched the [current issues](https://github.com/sa7mon/S3Scanner/issues) + required: true + + - type: input + attributes: + label: Command + description: Paste the command you ran that produced the issue. + placeholder: s3scanner -bucket b123 + validations: + required: true + + - type: textarea + attributes: + label: Observed behavior (what happened?) + description: Please provide a brief description of the bug in 1-2 sentences. + validations: + required: true + + - type: textarea + attributes: + label: Expected behaviour + description: Please describe precisely what you'd expect to happen. Be specific. + validations: + required: true + + - type: textarea + attributes: + label: Debug output + description: Please re-run your command with the `--verbose` flag and paste the results here + validations: + required: true + + - type: textarea + attributes: + label: OS Info + description: Operating system version + placeholder: | + Windows 11 Pro + Ubuntu 22.04 LTS + validations: + required: false + + - type: textarea + attributes: + label: Config file + description: If you are using any flags that require use of config file (`-provider custom`, `-db`, `-mq`, etc), please paste the contents of your config file here. + placeholder: | + db: + uri: "postgresql://user:pass@db:5432/postgres" + providers: + custom: + insecure: false + endpoint_format: "https://$REGION.myprovider.com" + regions: + - "us-east-1" + address_style: "path" + validations: + required: false + + - type: textarea + attributes: + label: Additional info + description: Please provide any additional information that seem useful. diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml new file mode 100644 index 0000000..bfd2351 --- /dev/null +++ b/.github/workflows/docker-publish.yml @@ -0,0 +1,87 @@ +name: Docker + +on: + release: + types: [published] + +env: + # Use docker.io for Docker Hub if empty + REGISTRY: ghcr.io + # github.repository as / + IMAGE_NAME: ${{ github.repository }} + +jobs: + build: + + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + # This is used to complete the identity challenge + # with sigstore/fulcio when running outside of PRs. + id-token: write + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + # Install the cosign tool except on PR + # https://github.com/sigstore/cosign-installer + - name: Install cosign + if: github.event_name != 'pull_request' + uses: sigstore/cosign-installer@f3c664df7af409cb4873aa5068053ba9d61a57b6 #v2.6.0 + with: + cosign-release: 'v1.13.1' + + + # Workaround: https://github.com/docker/build-push-action/issues/461 + - name: Setup Docker buildx + uses: docker/setup-buildx-action@79abd3f86f79a9d68a23c75a09a9a85889262adf + + # Login against a Docker registry except on PR + # https://github.com/docker/login-action + - name: Log into registry ${{ env.REGISTRY }} + if: github.event_name != 'pull_request' + uses: docker/login-action@28218f9b04b4f3f62068d7b6ce6ca5b26e35336c + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # Extract metadata (tags, labels) for Docker + # https://github.com/docker/metadata-action + - name: Extract Docker metadata + id: meta + uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + + # Build and push Docker image with Buildx (don't push on PR) + # https://github.com/docker/build-push-action + - name: Build and push Docker image + id: build-and-push + uses: docker/build-push-action@ac9327eae2b366085ac7f6a2d02df8aa8ead720a + with: + context: . + platforms: linux/amd64,linux/arm64 + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max + file: packaging/docker/Dockerfile + build-args: | + VERSION=${{ github.event.release.tag_name }} + + # Sign the resulting Docker image digest except on PRs. + # This will only write to the public Rekor transparency log when the Docker + # repository is public to avoid leaking data. If you would like to publish + # transparency data even for private images, pass --force to cosign below. + # https://github.com/sigstore/cosign + - name: Sign the published Docker image + if: ${{ github.event_name != 'pull_request' }} + env: + COSIGN_EXPERIMENTAL: "true" + # This step uses the identity token to provision an ephemeral certificate + # against the sigstore community Fulcio instance. + run: echo "${{ steps.meta.outputs.tags }}" | xargs -I {} cosign sign {}@${{ steps.build-and-push.outputs.digest }} diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml new file mode 100644 index 0000000..823fa67 --- /dev/null +++ b/.github/workflows/golangci-lint.yml @@ -0,0 +1,55 @@ +name: golangci-lint +on: +# push: +# branches: +# - master +# - main + pull_request: + +permissions: + contents: read + # Optional: allow read access to pull request. Use with `only-new-issues` option. + # pull-requests: read + +jobs: + golangci: + name: lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: '1.20' + cache: false + - name: golangci-lint + uses: golangci/golangci-lint-action@v3 + with: + # Require: The version of golangci-lint to use. + # When `install-mode` is `binary` (default) the value can be v1.2 or v1.2.3 or `latest` to use the latest version. + # When `install-mode` is `goinstall` the value can be v1.2.3, `latest`, or the hash of a commit. + version: v1.53 + + # Optional: working directory, useful for monorepos + # working-directory: somedir + + # Optional: golangci-lint command line arguments. + # + # Note: By default, the `.golangci.yml` file should be at the root of the repository. + # The location of the configuration file can be changed by using `--config=` + # args: --timeout=30m --config=/my/path/.golangci.yml --issues-exit-code=0 + + # Optional: show only new issues if it's a pull request. The default value is `false`. + # only-new-issues: true + + # Optional: if set to true, then all caching functionality will be completely disabled, + # takes precedence over all other caching options. + # skip-cache: true + + # Optional: if set to true, then the action won't cache or restore ~/go/pkg. + # skip-pkg-cache: true + + # Optional: if set to true, then the action won't cache or restore ~/.cache/go-build. + # skip-build-cache: true + + # Optional: The mode to install golangci-lint. It can be 'binary' or 'goinstall'. + # install-mode: "goinstall" \ No newline at end of file diff --git a/.github/workflows/pr_test-coverage.yml b/.github/workflows/pr_test-coverage.yml new file mode 100644 index 0000000..e45f44e --- /dev/null +++ b/.github/workflows/pr_test-coverage.yml @@ -0,0 +1,54 @@ +name: Test Coverage + +on: [ pull_request ] + +jobs: + pr-test: + if: ${{ github.event_name }} == 'pull_request' + permissions: + pull-requests: write + contents: read + runs-on: ubuntu-latest + services: + postgres: + image: postgres:12-alpine + env: + POSTGRES_PASSWORD: example + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + - "5432:5432" + mq: + image: rabbitmq:3-management + ports: + - 5672:5672 + - 15672:15672 + env: + ALLOW_ANONYMOUS_LOGIN: "yes" + steps: + - uses: actions/checkout@v3 + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: '1.18' + - name: Test + run: TEST_DB=1 TEST_MQ=1 go test ./... -coverprofile=cover.out + - name: Install dependencies + run: go install github.com/axw/gocov/gocov@v1.1.0 && go install github.com/AlekSi/gocov-xml@v1.1.0 + - run: gocov convert cover.out | gocov-xml > coverage.xml + - name: Code Coverage Summary + uses: irongut/CodeCoverageSummary@v1.3.0 + with: + filename: coverage.xml + badge: true + format: markdown + output: both + - name: Add Coverage PR Comment + uses: marocchino/sticky-pull-request-comment@v2.6.2 + with: + recreate: true + path: code-coverage-results.md + number: ${{ github.event.number }} \ No newline at end of file diff --git a/.github/workflows/pr_test-matrix.yml b/.github/workflows/pr_test-matrix.yml new file mode 100644 index 0000000..f756dc3 --- /dev/null +++ b/.github/workflows/pr_test-matrix.yml @@ -0,0 +1,56 @@ +name: Unit & Integration Tests + +on: [ pull_request ] + +jobs: + linux-matrix: + strategy: + matrix: + go: [ 1.18, 1.19, "1.20" ] + os: [ubuntu-latest] + runs-on: ${{ matrix.os }} + services: + postgres: + image: postgres:12-alpine + env: + POSTGRES_PASSWORD: example + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + - "5432:5432" + mq: + image: rabbitmq:3-management + ports: + - 5672:5672 + - 15672:15672 + env: + ALLOW_ANONYMOUS_LOGIN: "yes" + steps: + - uses: actions/checkout@v3 + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: ${{ matrix.go }} + - name: Test + run: TEST_DB=1 TEST_MQ=1 go test ./... + - name: Build + run: go build -v ./... + windows-matrix: + strategy: + matrix: + go: [ 1.18, 1.19, "1.20" ] + os: [ windows-latest ] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: ${{ matrix.go }} + - name: Test + run: go test ./... + - name: Build + run: go build -v ./... \ No newline at end of file diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..512706a --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,28 @@ +name: releases + +on: + push: + tags: + - '*' + +permissions: + contents: write + +jobs: + goreleaser: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - run: git fetch --force --tags + - uses: actions/setup-go@v4 + with: + go-version: stable + - uses: goreleaser/goreleaser-action@v4 + with: + distribution: goreleaser + version: latest + args: release -f ./packaging/.goreleaser.yaml --clean + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index 78c43e7..d44e62f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,24 +1,10 @@ -# IDE .idea/ -.vscode/ - -# Virtualenv -venv*/ - -# Cache -__pycache__ -*.pyc -.cache/ - -# Testing -.pytest_cache -test/ -.coverage -htmlcov/ -temp-*/ -buckets.txt - -# Pip build -build/ +*.log +logs/ +.DS_Store +*.sql.gz +data/ +bin/* +*.out dist/ -S3Scanner.egg-info/ \ No newline at end of file +packaging/dist/ diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 1d3c68e..0000000 --- a/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ -language: python -jobs: - include: - - python: '3.6' - - python: '3.7' - - python: '3.8' - - python: '3.9' - env: - - secure: "JShcKAHn4y57mTHDIV5+8dTRjE2cREJSswXAxhFf8jha+r58zF/uBfgXapzNh9u+dpvbVjF/N0/KxREubMTd4fduYTsxMOXyqENHnq7kVmRK6HXAAnM75JZzl1sonlHsIHHXxv45SuwYWX/fk6aMeBmkGukuvM8DGi4BEBzv0CnzEUmHlb5ZPKmQteemjhbn2d3yKPKagcieeDbSRhevGKPPmfnt0TqzpF/xrbtIL05yC+038Tesa0mZqV/HBrfZgSEtcMydIhbszhDjBwC3nzhhiC8AQJ8JGRPqR3nfTZRrHA0QMT3hr8XGpLouphvpDDwiotmOTRsGiBfONX+b2JDTx989eswIXmBsdua3pxjUNuLVTiRjl63+6zJSvT3mrJ1cZMRJPvbqYTY+mvckSMeDQv4oFZeD8QCD+z8zLa39GYfKBnapo0s+rvvxYyiVNZ9HQ1MExJyVleJWRMlmKtuNUHzHaCq+B8omcGZxhEfX4dVQ/RHwNRwkKbdUKOZy4muardhYorhVO+eLt4+bAipk8BEAXvIBwaAqbIN3+01a8TbTGKkxJUTllkf2Y7wFeF6IPtxvfpJ6Bgj4BNSpDrR/eoyIodG42J6Qdl4aK6/RQbI9vzUQ8yoSxQxzHHFZeclU2Qe5KZem3ztbexkiYB+Mv7oV/rr1LGixvbBsLzI=" - - secure: "SLjBJJsmtbHZGwmZHHJTYk0qmlS6kcbur1SMM70+n/UEp55hAFo6Ae/n75G4RR0bVIVkgrJp3ZE9V/wZKVbOOaUaepyjZXfgRBjL/zBYjFgxgQhrLis3Bg+lR6qBoWifm/mfM+mUqHLDqelSbvpgE/oZLeM9vuYBYvI9LIZSeM3C6m+4ytKoayUgggq87lQRDr9d/YPpAZEYnT13mAqkd3zbovjLAEtALx6BOg5xZv7bHCx5WS5gz79CA+jFRjWU9q4ng5zyCERWFOeTcCYAHjxKXYOJYew8N/NYA2PFd+BiedQRHuIJAHg/auofchBewmtfHG6rgMZSuE+jzl1aB344zwpVJocR09kBXi6tk9KiASTrZMSHf31LEJFwciFnSsCTVQG7kVL0NdjZBEGO7zE1u5c4dxYEctDVvCz/kmH7hl70zajot9cYihh8VLvLwpGBYepTf2a+vhRwdxeZ81KuI3SeOBqNJwyT6wZMw9AfEOmK9LqyS9vBqusujwua+W/DDeqYFo99HkS2uMdX4/IfAB5DDhVakMrff8rrUuf1K1H6rtV7qckOHDET+wdjqymZkPD/mjjW+ibAattls4cZU3I7NRVsnNiZmXAT410A92y6JEiZPuG1djz/57yrqQ3S4KzVhgBq1t5WJRc84dUKCbYnwY4fDL4BH8lkync=" - - name: "Python: 3.6" - os: windows - language: shell - before_install: - - choco install python --version=3.6.8 - - python -m pip install -U pip setuptools - env: PATH=/c/Python36:/c/Python36/Scripts:$PATH - cache: - directories: - - $LOCALAPPDATA/pip/Cache - -cache: pip -install: -- pip install -r requirements.txt -script: -- pytest -s -notifications: - email: false diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index aee9262..0000000 --- a/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -FROM python:3.8-alpine -COPY . /app -WORKDIR /app -RUN pip install boto3 -RUN pip install . -ENTRYPOINT ["python", "-m", "S3Scanner"] \ No newline at end of file diff --git a/LICENSE b/LICENSE index 0db5f5f..e8cf467 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2019 Dan Salmon +Copyright (c) 2023 Dan Salmon Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..4cffb25 --- /dev/null +++ b/Makefile @@ -0,0 +1,17 @@ +APP := s3scanner +DOCKER_IMAGE := hothamandcheese/s3scanner +VERSION := $(shell git describe --tags --abbrev=0) +COMMIT := $(shell git rev-parse --short HEAD) +BUILD_DATE := `date +%FT%T%z` + +dev: + docker compose -f .dev/docker-compose.yml up -d + +docker-image: + docker build -t $(DOCKER_IMAGE):$(VERSION) -f packaging/docker/Dockerfile . + +test: + go test ./... + +test-integration: + TEST_DB=1 TEST_MQ=1 go test ./... diff --git a/README.md b/README.md index 6ef6756..2438990 100644 --- a/README.md +++ b/README.md @@ -1,111 +1,305 @@ -# S3Scanner -[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Build Status](https://travis-ci.org/sa7mon/S3Scanner.svg?branch=master)](https://travis-ci.org/sa7mon/S3Scanner) +

+S3Scanner +

+ +

+ + + + +

+

+Features - Usage - Quick Start - Installation - Discuss +

+
+A tool to find open S3 buckets in AWS or other cloud providers: + +- AWS +- DigitalOcean +- DreamHost +- GCP +- Linode +- Custom + +demo + +# Features -A tool to find open S3 buckets and dump their contentsπŸ’§ +* ⚑️ Multi-threaded scanning +* πŸ”­ Supports many built-in S3 storage providers or custom +* πŸ•΅οΈβ€β™€οΈ Scans all bucket permissions to find misconfigurations +* πŸ’Ύ Save results to Postgres database +* πŸ‡ Connect to RabbitMQ for automated scanning at scale +* 🐳 Docker support - +# Usage -## Usage -
-usage: s3scanner [-h] [--version] [--threads n] [--endpoint-url ENDPOINT_URL] [--endpoint-address-style {path,vhost}] [--insecure] {scan,dump} ...
+```
+INPUT: (1 required)
+  -bucket        string  Name of bucket to check.
+  -bucket-file   string  File of bucket names to check.
+  -mq                    Connect to RabbitMQ to get buckets. Requires config file key "mq". Default: "false"
 
-s3scanner: Audit unsecured S3 buckets
-           by Dan Salmon - github.com/sa7mon, @bltjetpack
+OUTPUT:
+  -db       Save results to a Postgres database. Requires config file key "db.uri". Default: "false"
+  -json     Print logs to stdout in JSON format instead of human-readable. Default: "false"
 
-optional arguments:
-  -h, --help            show this help message and exit
-  --version             Display the current version of this tool
-  --threads n, -t n     Number of threads to use. Default: 4
-  --endpoint-url ENDPOINT_URL, -u ENDPOINT_URL
-                        URL of S3-compliant API. Default: https://s3.amazonaws.com
-  --endpoint-address-style {path,vhost}, -s {path,vhost}
-                        Address style to use for the endpoint. Default: path
-  --insecure, -i        Do not verify SSL
+OPTIONS:
+  -enumerate           Enumerate bucket objects (can be time-consuming). Default: "false"
+  -provider    string  Object storage provider: aws, custom, digitalocean, dreamhost, gcp, linode - custom requires config file. Default: "aws"
+  -threads     int     Number of threads to scan with. Default: "4"
 
-mode:
-  {scan,dump}           (Must choose one)
-    scan                Scan bucket permissions
-    dump                Dump the contents of buckets
-
+DEBUG: + -verbose Enable verbose logging. Default: "false" + -version Print version Default: "false" -## πŸš€ Support +If config file is required these locations will be searched for config.yml: "." "/etc/s3scanner/" "$HOME/.s3scanner/" +``` + +# πŸš€ Support If you've found this tool useful, please consider donating to support its development. You can find sponsor options on the side of this repo page or in [FUNDING.yml](.github/FUNDING.yml)

Huge thank you to tines for being an ongoing sponsor of this project.

- -## Installation +# Quick Start + +Scan AWS for bucket names listed in a file, enumerate all objects + ```shell + $ s3scanner -bucket-file names.txt -enumerate + ``` + +Scan a bucket in GCP, enumerate all objects, and save results to database + ```shell + $ s3scanner -provider gcp -db -bucket my-bucket -enumerate + ``` + +# Installation + +via Docker ```shell -pip3 install s3scanner +docker run --rm -it ghcr.io/sa7mon/s3scanner:latest -bucket my-bucket ``` -or via Docker: +from source ```shell -docker build . -t s3scanner:latest -docker run --rm s3scanner:latest scan --bucket my-buket +git clone git@github.com:sa7mon/S3Scanner.git && cd S3Scanner +go build -o s3scanner . +./s3scanner -bucket my-bucket +``` + +# Using + +## Input + +`s3scanner` requires exactly one type of input: `-bucket`, `-bucket-file`, or `-mq`. + +``` +INPUT: (1 required) + -bucket string Name of bucket to check. + -bucket-file string File of bucket names to check. + -mq Connect to RabbitMQ to get buckets. Requires config file key "mq". Default: "false" ``` -or from source: +*`-bucket`* +------------ + +Scan a single bucket ```shell -git clone git@github.com:sa7mon/S3Scanner.git -cd S3Scanner -pip3 install -r requirements.txt -python3 -m S3Scanner +s3scanner -bucket secret_uploads ``` -## Features +*`-bucket-file`* +---------------- +Scans every bucket name listed in file -* ⚑️ Multi-threaded scanning -* πŸ”­ Supports tons of S3-compatible APIs -* πŸ•΅οΈβ€β™€οΈ Scans all bucket permissions to find misconfigurations -* πŸ’Ύ Dump bucket contents to a local folder -* 🐳 Docker support +``` +s3scanner -bucket-file names.txt +``` +where `names.txt` contains one bucket name per line -## Examples +``` +$ cat names.txt +bucket123 +assets +image-uploads +``` -* Scan AWS buckets listed in a file with 8 threads - ```shell - $ s3scanner --threads 8 scan --buckets-file ./bucket-names.txt - ``` -* Scan a bucket in Digital Ocean Spaces - ```shell - $ s3scanner --endpoint-url https://sfo2.digitaloceanspaces.com scan --bucket my-bucket - ``` -* Dump a single AWS bucket - ```shell - $ s3scanner dump --bucket my-bucket-to-dump - ``` -* Scan a single Dreamhost Objects bucket which uses the vhost address style and an invalid SSL cert - ```shell - $ s3scanner --endpoint-url https://objects.dreamhost.com --endpoint-address-style vhost --insecure scan --bucket my-bucket - ``` +*`-mq`* +------- -## S3-compatible APIs +Connects to a RabbitMQ server and consumes messages containing bucket names to scan. -`S3Scanner` can scan and dump buckets in S3-compatible APIs services other than AWS by using the -`--endpoint-url` argument. Depending on the service, you may also need the `--endpoint-address-style` -or `--insecure` arguments as well. +``` +s3scanner -mq +``` -Some services have different endpoints corresponding to different regions +Messages should be JSON-encoded [`Bucket`](https://github.com/sa7mon/s3scanner/blob/main/bucket/bucket.go) objects - refer to [`mqingest`](https://github.com/sa7mon/s3scanner/blob/main/cmd/mqingest/mqingest.go) for a Golang publishing example. -**Note:** `S3Scanner` currently only supports scanning for anonymous user permissions of non-AWS services +`-mq` requires the `mq.uri` and `mq.queue_name` config file keys. See Config File section for example. + +## Output + +``` +OUTPUT: + -db Save results to a Postgres database. Requires config file key "db.uri". Default: "false" + -json Print logs to stdout in JSON format instead of human-readable. Default: "false" +``` + +*`-db`* +---------- + +Saves all scan results to a PostgreSQL database + +```shell +s3scanner -bucket images -db +``` + +* Requires the `db.uri` config file key. See Config File section for example. +* If using `-db`, results will also be printed to the console if using `-json` or the default human-readable output mode. +* `s3scanner` runs Gorm's [Auto Migration](https://gorm.io/docs/migration.html#Auto-Migration) feature each time it connects two the database. If +the schema already has tables with names Gorm expects, it may change these tables' structure. It is recommended to create a Postgres schema dedicated to `s3scanner` results. + +*`-json`* +---------- + +Instead of outputting scan results to console in human-readable format, output machine-readable JSON. -| Service | Example Endpoint | Address Style | Insecure ? | -|---------|------------------|:-------------:|:----------:| -| DigitalOcean Spaces (SFO2 region) | https://sfo2.digitaloceanspaces.com | path | No | -| Dreamhost | https://objects.dreamhost.com | vhost | Yes | -| Linode Object Storage (eu-central-1 region) | https://eu-central-1.linodeobjects.com | vhost | No | -| Scaleway Object Storage (nl-ams region) | https://s3.nl-ams.scw.cloud | path | No | -| Wasabi Cloud Storage | http://s3.wasabisys.com/ | path | Yes | +```shell +s3scanner -bucket images -json +``` + +This will print one JSON object per line to the console, which can then be piped to `jq` or other tools that accept JSON input. + +**Example**: Print bucket name and region for all buckets that exist + +```shell +$ s3scanner -bucket-file names.txt -json | jq -r '. | select(.bucket.exists==1) | [.bucket.name, .bucket.region] | join(" - ")' +10000 - eu-west-1 +10000.pizza - ap-southeast-1 +images_staging - us-west-2 +``` + +## Options + +``` +OPTIONS: + -enumerate Enumerate bucket objects (can be time-consuming). Default: "false" + -provider string Object storage provider: aws, custom, digitalocean, dreamhost, gcp, linode - custom requires config file. Default: "aws" + -threads int Number of threads to scan with. Default: "4" +``` + +*`-enumerate`* +-------------- + +Enumerate all objects stored in bucket. By default, `s3scanner` will only check permissions of buckets. +```shell +s3scanner -bucket attachments -enumerate +``` + +* **Note:** This can take a long time if there are a large number of objects stored. +* When enumerating, `s3scanner` will request "pages" of 1,000 objects. If there are more than 5,000 pages of objects, it will skip the rest. + +*`-provider`* +------------- -πŸ“š Current status of non-AWS APIs can be found [in the project wiki](https://github.com/sa7mon/S3Scanner/wiki/S3-Compatible-APIs) +Name of storage provider to use when checking buckets. + +```shell +s3scanner -bucket assets -provider gcp +``` + +* Use "custom" when targeting a currently unsupported or local network storage provider. +* "custom" provider requires config file keys under `providers.custom` listed in the Config File section. + +*`-threads`* +------------ + +Number of threads to scan with. + +```shell +s3scanner -bucket secret_docs -threads 8 +``` -## Interpreting Results +* Increasing threads will increase the number of buckets being scanned simultaneously, but will not speed up object enumeration. Enumeration is currently single-threaded per bucket. + +## Debug + +``` +DEBUG: + -verbose Enable verbose logging. Default: "false" + -version Print version Default: "false" +``` + +*`-verbose`* +------------ + +Enables verbose logging of debug messages. This option will produce a lot of logs and is not recommended to use unless filing a bug report. + +```shell +s3scanner -bucket spreadsheets -verbose +``` + +*`-version`* +------------ + +Print the version info and exit. + +```shell +s3scanner -version +``` + +* Will print `dev` if compiled from source. + +# Config File + +If using flags that require config options, `s3scanner` will search for `config.yml` in: + +* (current directory) +* `/etc/s3scanner/` +* `$HOME/.s3scanner/` + +```yaml +# Required by -db +db: + uri: "postgresql://user:pass@db.host.name:5432/schema_name" + +# Required by -mq +mq: + queue_name: "aws" + uri: "amqp://user:pass@localhost:5672" + +# providers.custom required by `-provider custom` +# address_style - Addressing style used by endpoints. +# type: string +# values: "path" or "vhost" +# endpoint_format - Format of endpoint URLs. Should contain '$REGION' as placeholder for region name +# type: string +# insecure - Ignore SSL errors +# type: boolean +# regions must contain at least one option +providers: + custom: + address_style: "path" + endpoint_format: "https://$REGION.vultrobjects.com" + insecure: false + regions: + - "ewr1" +``` + +When `s3scanner` parses the config file, it will take the `endpoint_format` and replace `$REGION` for all `regions` listed to create a list of endpoint URLs. + +# S3 compatible APIs + +**Note:** `S3Scanner` currently only supports scanning for anonymous user permissions of non-AWS services + +πŸ“š More information on non-AWS APIs can be found [in the project wiki](https://github.com/sa7mon/S3Scanner/wiki/S3-Compatible-APIs). + +## Permissions This tool will attempt to get all available information about a bucket, but it's up to you to interpret the results. @@ -116,22 +310,14 @@ This tool will attempt to get all available information about a bucket, but it's * Read ACP - Read all Access Control Policies attached to bucket * Write ACP - Write Access Control Policies to bucket * Full Control - All above permissions - + Any or all of these permissions can be set for the 2 main user groups: * Authenticated Users * Public Users (those without AWS credentials set) * Individual users/groups (out of scope of this tool) - -**What this means:** Just because a bucket doesn't allow reading/writing ACLs doesn't mean you can't read/write files in the bucket. Conversely, you may be able to list ACLs but not read/write to the bucket -## Contributors -* [Ohelig](https://github.com/Ohelig) -* [vysecurity](https://github.com/vysecurity) -* [janmasarik](https://github.com/janmasarik) -* [alanyee](https://github.com/alanyee) -* [klau5dev](https://github.com/klau5dev) -* [hipotermia](https://github.com/hipotermia) +**What this means:** Just because a bucket doesn't allow reading/writing ACLs doesn't mean you can't read/write files in the bucket. Conversely, you may be able to list ACLs but not read/write to the bucket -## License +# License MIT \ No newline at end of file diff --git a/S3Scanner/S3Bucket.py b/S3Scanner/S3Bucket.py deleted file mode 100644 index 76bc79a..0000000 --- a/S3Scanner/S3Bucket.py +++ /dev/null @@ -1,162 +0,0 @@ -import re -from enum import Enum - - -class Permission(Enum): - ALLOWED = 1, - DENIED = 0, - UNKNOWN = -1 - - -class BucketExists(Enum): - YES = 1, - NO = 0, - UNKNOWN = -1 - - -def bytes_to_human_readable(bytes_in, suffix='B'): - """ - Convert number of bytes to a "human-readable" format. i.e. 1024 -> 1KB - Shamelessly copied from: https://stackoverflow.com/a/1094933/2307994 - - :param int bytes_in: Number of bytes to convert - :param str suffix: Suffix to convert to - i.e. B/KB/MB - :return: str human-readable string - """ - for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: - if abs(bytes_in) < 1024.0: - return "%3.1f%s%s" % (bytes_in, unit, suffix) - bytes_in /= 1024.0 - return "%.1f%s%s" % (bytes_in, 'Yi', suffix) - - -class S3BucketObject: - """ - Represents an object stored in a bucket. - __eq__ and __hash__ are implemented to take full advantage of the set() deduplication - __lt__ is implemented to enable object sorting - """ - def __init__(self, size, last_modified, key): - self.size = size - self.last_modified = last_modified - self.key = key - - def __eq__(self, other): - return self.key == other.key - - def __hash__(self): - return hash(self.key) - - def __lt__(self, other): - return self.key < other.key - - def __repr__(self): - return "Key: %s, Size: %s, LastModified: %s" % (self.key, str(self.size), str(self.last_modified)) - - def get_human_readable_size(self): - return bytes_to_human_readable(self.size) - - -class S3Bucket: - """ - Represents a bucket which holds objects - """ - exists = BucketExists.UNKNOWN - objects = set() # A collection of S3BucketObject - bucketSize = 0 - objects_enumerated = False - foundACL = None - - def __init__(self, name): - """ - Constructor method - - :param str name: Name of bucket - :raises ValueError: If bucket name is invalid according to `_check_bucket_name()` - """ - check = self._check_bucket_name(name) - if not check['valid']: - raise ValueError("Invalid bucket name") - - self.name = check['name'] - - self.AuthUsersRead = Permission.UNKNOWN - self.AuthUsersWrite = Permission.UNKNOWN - self.AuthUsersReadACP = Permission.UNKNOWN - self.AuthUsersWriteACP = Permission.UNKNOWN - self.AuthUsersFullControl = Permission.UNKNOWN - - self.AllUsersRead = Permission.UNKNOWN - self.AllUsersWrite = Permission.UNKNOWN - self.AllUsersReadACP = Permission.UNKNOWN - self.AllUsersWriteACP = Permission.UNKNOWN - self.AllUsersFullControl = Permission.UNKNOWN - - def _check_bucket_name(self, name): - """ - Checks to make sure bucket names input are valid according to S3 naming conventions - - :param str name: Name of bucket to check - :return: dict: ['valid'] - bool: whether or not the name is valid, ['name'] - str: extracted bucket name - """ - bucket_name = "" - # Check if bucket name is valid and determine the format - if ".amazonaws.com" in name: # We were given a full s3 url - bucket_name = name[:name.rfind(".s3")] - elif ":" in name: # We were given a bucket in 'bucket:region' format - bucket_name = name.split(":")[0] - else: # We were given a regular bucket name - bucket_name = name - - # Bucket names can be 3-63 (inclusively) characters long. - # Bucket names may only contain lowercase letters, numbers, periods, and hyphens - pattern = r'(?=^.{3,63}$)(?!^(\d+\.)+\d+$)(^(([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])\.)*([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])$)' - return {'valid': bool(re.match(pattern, bucket_name)), 'name': bucket_name} - - def add_object(self, obj): - """ - Adds object to bucket. Updates the `objects` and `bucketSize` properties of the bucket - - :param S3BucketObject obj: Object to add to bucket - :return: None - """ - self.objects.add(obj) - self.bucketSize += obj.size - - def get_human_readable_size(self): - return bytes_to_human_readable(self.bucketSize) - - def get_human_readable_permissions(self): - """ - Returns a human-readable string of allowed permissions for this bucket - i.e. "AuthUsers: [Read | WriteACP], AllUsers: [FullControl]" - - :return: str: Human-readable permissions - """ - # Add AuthUsers permissions - authUsersPermissions = [] - if self.AuthUsersFullControl == Permission.ALLOWED: - authUsersPermissions.append("FullControl") - else: - if self.AuthUsersRead == Permission.ALLOWED: - authUsersPermissions.append("Read") - if self.AuthUsersWrite == Permission.ALLOWED: - authUsersPermissions.append("Write") - if self.AuthUsersReadACP == Permission.ALLOWED: - authUsersPermissions.append("ReadACP") - if self.AuthUsersWriteACP == Permission.ALLOWED: - authUsersPermissions.append("WriteACP") - # Add AllUsers permissions - allUsersPermissions = [] - if self.AllUsersFullControl == Permission.ALLOWED: - allUsersPermissions.append("FullControl") - else: - if self.AllUsersRead == Permission.ALLOWED: - allUsersPermissions.append("Read") - if self.AllUsersWrite == Permission.ALLOWED: - allUsersPermissions.append("Write") - if self.AllUsersReadACP == Permission.ALLOWED: - allUsersPermissions.append("ReadACP") - if self.AllUsersWriteACP == Permission.ALLOWED: - allUsersPermissions.append("WriteACP") - return f"AuthUsers: [{', '.join(authUsersPermissions)}], AllUsers: [{', '.join(allUsersPermissions)}]" diff --git a/S3Scanner/S3Service.py b/S3Scanner/S3Service.py deleted file mode 100644 index 2105703..0000000 --- a/S3Scanner/S3Service.py +++ /dev/null @@ -1,485 +0,0 @@ -""" - This will be a service that the client program will instantiate to then call methods - passing buckets -""" -from boto3 import client, session as boto_session # TODO: Limit import to just boto3.client, probably -from S3Scanner.S3Bucket import S3Bucket, BucketExists, Permission, S3BucketObject -from botocore.exceptions import ClientError -import botocore.session -from botocore import UNSIGNED -from botocore.client import Config -import datetime -from S3Scanner.exceptions import AccessDeniedException, InvalidEndpointException, BucketMightNotExistException -import pathlib -from concurrent.futures import ThreadPoolExecutor, as_completed -from functools import partial -from urllib3 import disable_warnings -import os - - -ALL_USERS_URI = 'uri=http://acs.amazonaws.com/groups/global/AllUsers' -AUTH_USERS_URI = 'uri=http://acs.amazonaws.com/groups/global/AuthenticatedUsers' - - -class S3Service: - def __init__(self, forceNoCreds=False, endpoint_url='https://s3.amazonaws.com', verify_ssl=True, - endpoint_address_style='path', profile='default'): - """ - Service constructor - - :param forceNoCreds: (Boolean) Force the service to not use credentials, even if the user has creds configured - :param endpoint_url: (String) URL of S3 endpoint to use. Must include http(s):// scheme - :param verify_ssl: (Boolean) Whether of not to verify ssl. Set to false if endpoint is http - :param endpoint_address_style: (String) Addressing style of the endpoint. Must be 'path' or 'vhost' - :returns None - """ - self.endpoint_url = endpoint_url - self.endpoint_address_style = 'path' if endpoint_address_style == 'path' else 'virtual' - use_ssl = True if self.endpoint_url.startswith('http://') else False - - if not verify_ssl: - disable_warnings() - - # DEBUG - # boto3.set_stream_logger(name='botocore') - - # Validate the endpoint if it's not the default of AWS - if self.endpoint_url != 'https://s3.amazonaws.com': - if not self.validate_endpoint_url(use_ssl, verify_ssl, endpoint_address_style): - raise InvalidEndpointException(message=f"Endpoint '{self.endpoint_url}' does not appear to be S3-compliant") - - # Check for AWS credentials - session = boto_session.Session() - if profile in session.available_profiles: # use provided profile, if it is availble to use - session = boto_session.Session(profile_name=profile) - else: - print(f"Error: profile \"{profile}\" not found in ~/.aws/credentials") - exit(1) - - if forceNoCreds or session.get_credentials() is None or session.get_credentials().access_key is None: - self.aws_creds_configured = False - self.s3_client = client('s3', - config=Config(signature_version=UNSIGNED, s3={'addressing_style': self.endpoint_address_style}, connect_timeout=3, - retries={'max_attempts': 2}), - endpoint_url=self.endpoint_url, use_ssl=use_ssl, verify=verify_ssl) - else: - self.aws_creds_configured = True - self.s3_client = session.client('s3', config=Config(s3={'addressing_style': self.endpoint_address_style}, connect_timeout=3, - retries={'max_attempts': 2}), - endpoint_url=self.endpoint_url, use_ssl=use_ssl, verify=verify_ssl) - - del session # No longer needed - - def check_bucket_exists(self, bucket): - """ - Checks if a bucket exists. Sets `exists` property of `bucket` - - :param S3Bucket bucket: Bucket to check - :raises ValueError: If `bucket` is not an s3Bucket object - :return: None - """ - if not isinstance(bucket, S3Bucket): - raise ValueError("Passed object was not type S3Bucket") - - bucket_exists = True - - try: - self.s3_client.head_bucket(Bucket=bucket.name) - except ClientError as e: - if e.response['Error']['Code'] == '404': - bucket_exists = False - - bucket.exists = BucketExists.YES if bucket_exists else BucketExists.NO - - def check_perm_read_acl(self, bucket): - """ - Check for the READACP permission on `bucket` by trying to get the bucket ACL - - :param S3Bucket bucket: Bucket to check permission of - :raises BucketMightNotExistException: If `bucket` existence hasn't been checked - :raises ClientError: If we encounter an unexpected ClientError from boto client - :return: None - """ - - if bucket.exists != BucketExists.YES: - raise BucketMightNotExistException() - - try: - bucket.foundACL = self.s3_client.get_bucket_acl(Bucket=bucket.name) - self.parse_found_acl(bucket) # If we can read ACLs, we know the rest of the permissions - except ClientError as e: - if e.response['Error']['Code'] == "AccessDenied" or e.response['Error']['Code'] == "AllAccessDisabled": - if self.aws_creds_configured: - bucket.AuthUsersReadACP = Permission.DENIED - else: - bucket.AllUsersReadACP = Permission.DENIED - else: - raise e - - def check_perm_read(self, bucket): - """ - Checks for the READ permission on the bucket by attempting to list the objects. - Sets the `AllUsersRead` and/or `AuthUsersRead` property of `bucket`. - - :param S3Bucket bucket: Bucket to check permission of - :raises BucketMightNotExistException: If `bucket` existence hasn't been checked - :raises ClientError: If we encounter an unexpected ClientError from boto client - :return: None - """ - if bucket.exists != BucketExists.YES: - raise BucketMightNotExistException() - - list_bucket_perm_allowed = True - try: - self.s3_client.list_objects_v2(Bucket=bucket.name, MaxKeys=0) # TODO: Compare this to doing a HeadBucket - except ClientError as e: - if e.response['Error']['Code'] == "AccessDenied" or e.response['Error']['Code'] == "AllAccessDisabled": - list_bucket_perm_allowed = False - else: - print(f"ERROR: Error while checking bucket {bucket.name}") - raise e - if self.aws_creds_configured: - # Don't mark AuthUsersRead as Allowed if it's only implicitly allowed due to AllUsersRead being allowed - # We only want to make AuthUsersRead as Allowed if that permission is explicitly set for AuthUsers - if bucket.AllUsersRead != Permission.ALLOWED: - bucket.AuthUsersRead = Permission.ALLOWED if list_bucket_perm_allowed else Permission.DENIED - else: - bucket.AllUsersRead = Permission.ALLOWED if list_bucket_perm_allowed else Permission.DENIED - - def check_perm_write(self, bucket): - """ - Check for WRITE permission by trying to upload an empty file to the bucket. - File is named the current timestamp to ensure we're not overwriting an existing file in the bucket. - - NOTE: If writing to bucket succeeds using an AuthUser, only mark AuthUsersWrite as Allowed if AllUsers are - Denied. Writing can succeed if AuthUsers are implicitly allowed due to AllUsers being allowed, but we only want - to mark AuthUsers as Allowed if they are explicitly granted. If AllUsersWrite is Allowed and the write is - successful by an AuthUser, we have no way of knowing if AuthUsers were granted permission explicitly - - :param S3Bucket bucket: Bucket to check permission of - :raises BucketMightNotExistException: If `bucket` existence hasn't been checked - :raises ClientError: If we encounter an unexpected ClientError from boto client - :return: None - """ - if bucket.exists != BucketExists.YES: - raise BucketMightNotExistException() - - timestamp_file = str(datetime.datetime.now().timestamp()) + '.txt' - - try: - # Try to create a new empty file with a key of the timestamp - self.s3_client.put_object(Bucket=bucket.name, Key=timestamp_file, Body=b'') - - if self.aws_creds_configured: - if bucket.AllUsersWrite != Permission.ALLOWED: # If AllUsers have Write permission, don't mark AuthUsers as Allowed - bucket.AuthUsersWrite = Permission.ALLOWED - else: - bucket.AuthUsersWrite = Permission.UNKNOWN - else: - bucket.AllUsersWrite = Permission.ALLOWED - - # Delete the temporary file - self.s3_client.delete_object(Bucket=bucket.name, Key=timestamp_file) - except ClientError as e: - if e.response['Error']['Code'] == "AccessDenied" or e.response['Error']['Code'] == "AllAccessDisabled": - if self.aws_creds_configured: - bucket.AuthUsersWrite = Permission.DENIED - else: - bucket.AllUsersWrite = Permission.DENIED - else: - raise e - - def check_perm_write_acl(self, bucket): - """ - Checks for WRITE_ACP permission by attempting to set an ACL on the bucket. - WARNING: Potentially destructive - make sure to run this check last as it will include all discovered - permissions in the ACL it tries to set, thus ensuring minimal disruption for the bucket owner. - - :param S3Bucket bucket: Bucket to check permission of - :raises BucketMightNotExistException: If `bucket` existence hasn't been checked - :raises ClientError: If we encounter an unexpected ClientError from boto client - :return: None - """ - if bucket.exists != BucketExists.YES: - raise BucketMightNotExistException() - - # TODO: See if there's a way to simplify this section - readURIs = [] - writeURIs = [] - readAcpURIs = [] - writeAcpURIs = [] - fullControlURIs = [] - - if bucket.AuthUsersRead == Permission.ALLOWED: - readURIs.append(AUTH_USERS_URI) - if bucket.AuthUsersWrite == Permission.ALLOWED: - writeURIs.append(AUTH_USERS_URI) - if bucket.AuthUsersReadACP == Permission.ALLOWED: - readAcpURIs.append(AUTH_USERS_URI) - if bucket.AuthUsersWriteACP == Permission.ALLOWED: - writeAcpURIs.append(AUTH_USERS_URI) - if bucket.AuthUsersFullControl == Permission.ALLOWED: - fullControlURIs.append(AUTH_USERS_URI) - - if bucket.AllUsersRead == Permission.ALLOWED: - readURIs.append(ALL_USERS_URI) - if bucket.AllUsersWrite == Permission.ALLOWED: - writeURIs.append(ALL_USERS_URI) - if bucket.AllUsersReadACP == Permission.ALLOWED: - readAcpURIs.append(ALL_USERS_URI) - if bucket.AllUsersWriteACP == Permission.ALLOWED: - writeAcpURIs.append(ALL_USERS_URI) - if bucket.AllUsersFullControl == Permission.ALLOWED: - fullControlURIs.append(ALL_USERS_URI) - - if self.aws_creds_configured: # Otherwise AWS will return "Request was missing a required header" - writeAcpURIs.append(AUTH_USERS_URI) - else: - writeAcpURIs.append(ALL_USERS_URI) - args = {'Bucket': bucket.name} - if len(readURIs) > 0: - args['GrantRead'] = ','.join(readURIs) - if len(writeURIs) > 0: - args['GrantWrite'] = ','.join(writeURIs) - if len(readAcpURIs) > 0: - args['GrantReadACP'] = ','.join(readAcpURIs) - if len(writeAcpURIs) > 0: - args['GrantWriteACP'] = ','.join(writeAcpURIs) - if len(fullControlURIs) > 0: - args['GrantFullControl'] = ','.join(fullControlURIs) - try: - self.s3_client.put_bucket_acl(**args) - if self.aws_creds_configured: - # Don't mark AuthUsersWriteACP as Allowed if it's due to implicit permission via AllUsersWriteACP - # Only mark it as allowed if the AuthUsers group is explicitly allowed - if bucket.AllUsersWriteACP != Permission.ALLOWED: - bucket.AuthUsersWriteACP = Permission.ALLOWED - else: - bucket.AuthUsersWriteACP = Permission.UNKNOWN - else: - bucket.AllUsersWriteACP = Permission.ALLOWED - except ClientError as e: - if e.response['Error']['Code'] == "AccessDenied" or e.response['Error']['Code'] == "AllAccessDisabled": - if self.aws_creds_configured: - bucket.AuthUsersWriteACP = Permission.DENIED - else: - bucket.AllUsersWriteACP = Permission.DENIED - else: - raise e - - def dump_bucket_multithread(self, bucket, dest_directory, verbose=False, threads=4): - """ - Takes a bucket and downloads all the objects to a local folder. - If the object exists locally and is the same size as the remote object, the object is skipped. - If the object exists locally and is a different size then the remote object, the local object is overwritten. - - :param S3Bucket bucket: Bucket whose contents we want to dump - :param str dest_directory: Folder to save the objects to. Must include trailing slash - :param bool verbose: Output verbose messages to the user - :param int threads: Number of threads to use while dumping - :return: None - """ - # TODO: Let the user choose whether or not to overwrite local files if different - - print(f"{bucket.name} | Dumping contents using 4 threads...") - func = partial(self.download_file, dest_directory, bucket, verbose) - - with ThreadPoolExecutor(max_workers=threads) as executor: - futures = { - executor.submit(func, obj): obj for obj in bucket.objects - } - - for future in as_completed(futures): - if future.exception(): - print(f"{bucket.name} | Download failed: {futures[future]} | {future.exception()}") - - print(f"{bucket.name} | Dumping completed") - - def download_file(self, dest_directory, bucket, verbose, obj): - """ - Download `obj` from `bucket` into `dest_directory` - - :param str dest_directory: Directory to store the object into. _Must_ end in a slash - :param S3Bucket bucket: Bucket to download the object from - :param bool verbose: Output verbose messages to the user - :param S3BucketObject obj: Object to downlaod - :return: None - """ - dest_file_path = pathlib.Path(os.path.normpath(os.path.join(dest_directory, obj.key))) - - if not self.is_safe_file_to_download(obj.key, dest_directory): - print(f"{bucket.name} | Skipping file {obj.key}. File references a parent directory.") - return - if dest_file_path.exists(): - if dest_file_path.stat().st_size == obj.size: - if verbose: - print(f"{bucket.name} | Skipping {obj.key} - already downloaded") - return - else: - if verbose: - print(f"{bucket.name} | Re-downloading {obj.key} - local size differs from remote") - else: - if verbose: - print(f"{bucket.name} | Downloading {obj.key}") - dest_file_path.parent.mkdir(parents=True, exist_ok=True) # Equivalent to `mkdir -p` - self.s3_client.download_file(bucket.name, obj.key, str(dest_file_path)) - - def enumerate_bucket_objects(self, bucket): - """ - Enumerate all the objects in a bucket. Sets the `BucketSize`, `objects`, and `objects_enumerated` properties - of `bucket`. - - :param S3Bucket bucket: Bucket to enumerate objects of - :raises Exception: If the bucket doesn't exist - :raises AccessDeniedException: If we are denied access to the bucket objects - :raises ClientError: If we encounter an unexpected ClientError from boto client - :return: None - """ - if bucket.exists == BucketExists.UNKNOWN: - self.check_bucket_exists(bucket) - if bucket.exists == BucketExists.NO: - raise Exception("Bucket doesn't exist") - - try: - for page in self.s3_client.get_paginator("list_objects_v2").paginate(Bucket=bucket.name): - if 'Contents' not in page: # No items in this bucket - bucket.objects_enumerated = True - return - for item in page['Contents']: - obj = S3BucketObject(key=item['Key'], last_modified=item['LastModified'], size=item['Size']) - bucket.add_object(obj) - except ClientError as e: - if e.response['Error']['Code'] == "AccessDenied" or e.response['Error']['Code'] == "AllAccessDisabled": - raise AccessDeniedException("AccessDenied while enumerating bucket objects") - bucket.objects_enumerated = True - - def is_safe_file_to_download(self, file_to_check, dest_directory): - """ - Check if bucket object would be saved outside of `dest_directory` if downloaded. - AWS allows object keys to include relative path characters like '../' which can lead to a - path traversal-like issue where objects get saved outside of the intended directory. - - :param string file_to_check: Bucket object key - :param string dest_directory: Path to directory to save file in - :return: bool - """ - file_to_check = os.path.abspath(os.path.join(dest_directory, file_to_check)) - safe_dir = os.path.abspath(dest_directory) - return os.path.commonpath([safe_dir]) == os.path.commonpath([safe_dir, file_to_check]) - - def parse_found_acl(self, bucket): - """ - Translate ACL grants into permission properties. If we were able to read the ACLs, we should be able to skip - manually checking most permissions - - :param S3Bucket bucket: Bucket whose ACLs we want to parse - :return: None - """ - if bucket.foundACL is None: - return - - if 'Grants' in bucket.foundACL: - for grant in bucket.foundACL['Grants']: - if grant['Grantee']['Type'] == 'Group': - if 'URI' in grant['Grantee'] and grant['Grantee']['URI'] == 'http://acs.amazonaws.com/groups/global/AuthenticatedUsers': - # Permissions have been given to the AuthUsers group - if grant['Permission'] == 'FULL_CONTROL': - bucket.AuthUsersRead = Permission.ALLOWED - bucket.AuthUsersWrite = Permission.ALLOWED - bucket.AuthUsersReadACP = Permission.ALLOWED - bucket.AuthUsersWriteACP = Permission.ALLOWED - bucket.AuthUsersFullControl = Permission.ALLOWED - elif grant['Permission'] == 'READ': - bucket.AuthUsersRead = Permission.ALLOWED - elif grant['Permission'] == 'READ_ACP': - bucket.AuthUsersReadACP = Permission.ALLOWED - elif grant['Permission'] == 'WRITE': - bucket.AuthUsersWrite = Permission.ALLOWED - elif grant['Permission'] == 'WRITE_ACP': - bucket.AuthUsersWriteACP = Permission.ALLOWED - - elif 'URI' in grant['Grantee'] and grant['Grantee']['URI'] == 'http://acs.amazonaws.com/groups/global/AllUsers': - # Permissions have been given to the AllUsers group - if grant['Permission'] == 'FULL_CONTROL': - bucket.AllUsersRead = Permission.ALLOWED - bucket.AllUsersWrite = Permission.ALLOWED - bucket.AllUsersReadACP = Permission.ALLOWED - bucket.AllUsersWriteACP = Permission.ALLOWED - bucket.AllUsersFullControl = Permission.ALLOWED - elif grant['Permission'] == 'READ': - bucket.AllUsersRead = Permission.ALLOWED - elif grant['Permission'] == 'READ_ACP': - bucket.AllUsersReadACP = Permission.ALLOWED - elif grant['Permission'] == 'WRITE': - bucket.AllUsersWrite = Permission.ALLOWED - elif grant['Permission'] == 'WRITE_ACP': - bucket.AllUsersWriteACP = Permission.ALLOWED - - # All permissions not explicitly granted in the ACL are denied - # TODO: Simplify this - if bucket.AuthUsersRead == Permission.UNKNOWN: - bucket.AuthUsersRead = Permission.DENIED - - if bucket.AuthUsersWrite == Permission.UNKNOWN: - bucket.AuthUsersWrite = Permission.DENIED - - if bucket.AuthUsersReadACP == Permission.UNKNOWN: - bucket.AuthUsersReadACP = Permission.DENIED - - if bucket.AuthUsersWriteACP == Permission.UNKNOWN: - bucket.AuthUsersWriteACP = Permission.DENIED - - if bucket.AuthUsersFullControl == Permission.UNKNOWN: - bucket.AuthUsersFullControl = Permission.DENIED - - if bucket.AllUsersRead == Permission.UNKNOWN: - bucket.AllUsersRead = Permission.DENIED - - if bucket.AllUsersWrite == Permission.UNKNOWN: - bucket.AllUsersWrite = Permission.DENIED - - if bucket.AllUsersReadACP == Permission.UNKNOWN: - bucket.AllUsersReadACP = Permission.DENIED - - if bucket.AllUsersWriteACP == Permission.UNKNOWN: - bucket.AllUsersWriteACP = Permission.DENIED - - if bucket.AllUsersFullControl == Permission.UNKNOWN: - bucket.AllUsersFullControl = Permission.DENIED - - def validate_endpoint_url(self, use_ssl=True, verify_ssl=True, endpoint_address_style='path'): - """ - Verify the user-supplied endpoint URL is S3-compliant by trying to list a maximum of 0 keys from a bucket which - is extremely unlikely to exist. - - Note: Most S3-compliant services will return an error code of "NoSuchBucket". Some services which require auth - for most operations (like Minio) will return an error code of "AccessDenied" instead - - :param bool use_ssl: Whether or not the endpoint serves HTTP over SSL - :param bool verify_ssl: Whether or not to verify the SSL connection. - :param str endpoint_address_style: Addressing style of endpoint. Must be either 'path' or 'vhost' - :return: bool: Whether or not the server responded in an S3-compliant way - """ - - # We always want to verify the endpoint using no creds - # so if the s3_client has creds configured, make a new anonymous client - - addressing_style = 'virtual' if endpoint_address_style == 'vhost' else 'path' - - validation_client = client('s3', config=Config(signature_version=UNSIGNED, - s3={'addressing_style': addressing_style}, connect_timeout=3, - retries={'max_attempts': 0}), endpoint_url=self.endpoint_url, use_ssl=use_ssl, - verify=verify_ssl) - - non_existent_bucket = 's3scanner-' + str(datetime.datetime.now())[0:10] - try: - validation_client.list_objects_v2(Bucket=non_existent_bucket, MaxKeys=0) - except ClientError as e: - if e.response['Error']['Code'] == 'NoSuchBucket' or e.response['Error']['Code'] == 'AccessDenied': - return True - return False - except botocore.exceptions.ConnectTimeoutError: - return False - - # If we get here, the bucket either existed (unlikely) or the server returned a 200 for some reason - return False diff --git a/S3Scanner/__init__.py b/S3Scanner/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/S3Scanner/__main__.py b/S3Scanner/__main__.py deleted file mode 100644 index c044880..0000000 --- a/S3Scanner/__main__.py +++ /dev/null @@ -1,246 +0,0 @@ -######### -# -# S3scanner - Audit unsecured S3 buckets -# -# Author: Dan Salmon (twitter.com/bltjetpack, github.com/sa7mon) -# Created: 6/19/17 -# License: MIT -# -######### - -import argparse -from os import path -from sys import exit -from .S3Bucket import S3Bucket, BucketExists, Permission -from .S3Service import S3Service -from concurrent.futures import ThreadPoolExecutor, as_completed -from .exceptions import InvalidEndpointException - -CURRENT_VERSION = '2.0.2' -AWS_ENDPOINT = 'https://s3.amazonaws.com' - - -# We want to use both formatter classes, so a custom class it is -class CustomFormatter(argparse.RawTextHelpFormatter, argparse.RawDescriptionHelpFormatter): - pass - - -def load_bucket_names_from_file(file_name): - """ - Load in bucket names from a text file - - :param str file_name: Path to text file - :return: set: All lines of text file - """ - buckets = set() - if path.isfile(file_name): - with open(file_name, 'r') as f: - for line in f: - line = line.rstrip() # Remove any extra whitespace - buckets.add(line) - return buckets - else: - print("Error: '%s' is not a file" % file_name) - exit(1) - - -def scan_single_bucket(s3service, anons3service, do_dangerous, bucket_name): - """ - Scans a single bucket for permission issues. Exists on its own so we can do multi-threading - - :param S3Service s3service: S3Service with credentials to use for scanning - :param S3Service anonS3Service: S3Service without credentials to use for scanning - :param bool do_dangerous: Whether or not to do dangerous checks - :param str bucket_name: Name of bucket to check - :return: None - """ - try: - b = S3Bucket(bucket_name) - except ValueError as ve: - if str(ve) == "Invalid bucket name": - print(f"{bucket_name} | bucket_invalid_name") - return - else: - print(f"{bucket_name} | {str(ve)}") - return - - # Check if bucket exists first - # Use credentials if configured and if we're hitting AWS. Otherwise, check anonymously - if s3service.endpoint_url == AWS_ENDPOINT: - s3service.check_bucket_exists(b) - else: - anons3service.check_bucket_exists(b) - - if b.exists == BucketExists.NO: - print(f"{b.name} | bucket_not_exist") - return - checkAllUsersPerms = True - checkAuthUsersPerms = True - - # 1. Check for ReadACP - anons3service.check_perm_read_acl(b) # Check for AllUsers - if s3service.aws_creds_configured and s3service.endpoint_url == AWS_ENDPOINT: - s3service.check_perm_read_acl(b) # Check for AuthUsers - - # If FullControl is allowed for either AllUsers or AnonUsers, skip the remainder of those tests - if b.AuthUsersFullControl == Permission.ALLOWED: - checkAuthUsersPerms = False - if b.AllUsersFullControl == Permission.ALLOWED: - checkAllUsersPerms = False - - # 2. Check for Read - if checkAllUsersPerms: - anons3service.check_perm_read(b) - if s3service.aws_creds_configured and checkAuthUsersPerms and s3service.endpoint_url == AWS_ENDPOINT: - s3service.check_perm_read(b) - - # Do dangerous/destructive checks - if do_dangerous: - # 3. Check for Write - if checkAllUsersPerms: - anons3service.check_perm_write(b) - if s3service.aws_creds_configured and checkAuthUsersPerms: - s3service.check_perm_write(b) - - # 4. Check for WriteACP - if checkAllUsersPerms: - anons3service.check_perm_write_acl(b) - if s3service.aws_creds_configured and checkAuthUsersPerms: - s3service.check_perm_write_acl(b) - - print(f"{b.name} | bucket_exists | {b.get_human_readable_permissions()}") - - -def main(): - # Instantiate the parser - parser = argparse.ArgumentParser(description='s3scanner: Audit unsecured S3 buckets\n' - ' by Dan Salmon - github.com/sa7mon, @bltjetpack\n', - prog='s3scanner', allow_abbrev=False, formatter_class=CustomFormatter) - # Declare arguments - parser.add_argument('--version', action='version', version=CURRENT_VERSION, - help='Display the current version of this tool') - parser.add_argument('--threads', '-t', type=int, default=4, dest='threads', help='Number of threads to use. Default: 4', - metavar='n') - parser.add_argument('--endpoint-url', '-u', dest='endpoint_url', - help='URL of S3-compliant API. Default: https://s3.amazonaws.com', - default='https://s3.amazonaws.com') - parser.add_argument('--endpoint-address-style', '-s', dest='endpoint_address_style', choices=['path', 'vhost'], - default='path', help='Address style to use for the endpoint. Default: path') - parser.add_argument('--insecure', '-i', dest='verify_ssl', action='store_false', help='Do not verify SSL') - parser.add_argument('--profile', '-p', dest='aws_profile',default='default', help='AWS profile to use (defaults to `default`)') - subparsers = parser.add_subparsers(title='mode', dest='mode', help='(Must choose one)') - - # Scan mode - parser_scan = subparsers.add_parser('scan', help='Scan bucket permissions') - parser_scan.add_argument('--dangerous', action='store_true', - help='Include Write and WriteACP permissions checks (potentially destructive)') - parser_group = parser_scan.add_mutually_exclusive_group(required=True) - parser_group.add_argument('--buckets-file', '-f', dest='buckets_file', - help='Name of text file containing bucket names to check', metavar='file') - parser_group.add_argument('--bucket', '-b', dest='bucket', help='Name of bucket to check', metavar='bucket') - # TODO: Get help output to not repeat metavar names - i.e. `--bucket FILE, -f FILE` - # https://stackoverflow.com/a/9643162/2307994 - - # Dump mode - parser_dump = subparsers.add_parser('dump', help='Dump the contents of buckets') - parser_dump.add_argument('--dump-dir', '-d', required=True, dest='dump_dir', help='Directory to dump bucket into') - dump_parser_group = parser_dump.add_mutually_exclusive_group(required=True) - dump_parser_group.add_argument('--buckets-file', '-f', dest='dump_buckets_file', - help='Name of text file containing bucket names to check', metavar='file') - dump_parser_group.add_argument('--bucket', '-b', dest='dump_bucket', help='Name of bucket to check', metavar='bucket') - parser_dump.add_argument('--verbose', '-v', dest='dump_verbose', action='store_true', - help='Enable verbose output while dumping bucket(s)') - - # Parse the args - args = parser.parse_args() - - if 'http://' not in args.endpoint_url and 'https://' not in args.endpoint_url: - print("Error: endpoint_url must start with http:// or https:// scheme") - exit(1) - - s3service = None - anons3service = None - try: - s3service = S3Service(endpoint_url=args.endpoint_url, verify_ssl=args.verify_ssl, endpoint_address_style=args.endpoint_address_style,profile=args.aws_profile) - anons3service = S3Service(forceNoCreds=True, endpoint_url=args.endpoint_url, verify_ssl=args.verify_ssl, endpoint_address_style=args.endpoint_address_style) - except InvalidEndpointException as e: - print(f"Error: {e.message}") - exit(1) - - if s3service.aws_creds_configured is False: - print("Warning: AWS credentials not configured - functionality will be limited. Run:" - " `aws configure` to fix this.\n") - - bucketsIn = set() - - if args.mode == 'scan': - if args.buckets_file is not None: - bucketsIn = load_bucket_names_from_file(args.buckets_file) - elif args.bucket is not None: - bucketsIn.add(args.bucket) - - if args.dangerous: - print("INFO: Including dangerous checks. WARNING: This may change bucket ACL destructively") - - with ThreadPoolExecutor(max_workers=args.threads) as executor: - futures = { - executor.submit(scan_single_bucket, s3service, anons3service, args.dangerous, bucketName): bucketName for bucketName in bucketsIn - } - for future in as_completed(futures): - if future.exception(): - print(f"Bucket scan raised exception: {futures[future]} - {future.exception()}") - - elif args.mode == 'dump': - if args.dump_dir is None or not path.isdir(args.dump_dir): - print("Error: Given --dump-dir does not exist or is not a directory") - exit(1) - if args.dump_buckets_file is not None: - bucketsIn = load_bucket_names_from_file(args.dump_buckets_file) - elif args.dump_bucket is not None: - bucketsIn.add(args.dump_bucket) - - for bucketName in bucketsIn: - try: - b = S3Bucket(bucketName) - except ValueError as ve: - if str(ve) == "Invalid bucket name": - print(f"{bucketName} | bucket_name_invalid") - continue - else: - print(f"{bucketName} | {str(ve)}") - continue - - # Check if bucket exists first - s3service.check_bucket_exists(b) - - if b.exists == BucketExists.NO: - print(f"{b.name} | bucket_not_exist") - continue - - s3service.check_perm_read(b) - - if b.AuthUsersRead != Permission.ALLOWED: - anons3service.check_perm_read(b) - if b.AllUsersRead != Permission.ALLOWED: - print(f"{b.name} | Error: no read permissions") - else: - # Dump bucket without creds - print(f"{b.name} | Enumerating bucket objects...") - anons3service.enumerate_bucket_objects(b) - print(f"{b.name} | Total Objects: {str(len(b.objects))}, Total Size: {b.get_human_readable_size()}") - anons3service.dump_bucket_multithread(bucket=b, dest_directory=args.dump_dir, - verbose=args.dump_verbose, threads=args.threads) - else: - # Dump bucket with creds - print(f"{b.name} | Enumerating bucket objects...") - s3service.enumerate_bucket_objects(b) - print(f"{b.name} | Total Objects: {str(len(b.objects))}, Total Size: {b.get_human_readable_size()}") - s3service.dump_bucket_multithread(bucket=b, dest_directory=args.dump_dir, - verbose=args.dump_verbose, threads=args.threads) - else: - print("Invalid mode") - parser.print_help() - - -if __name__ == "__main__": - main() diff --git a/S3Scanner/exceptions.py b/S3Scanner/exceptions.py deleted file mode 100644 index feb7227..0000000 --- a/S3Scanner/exceptions.py +++ /dev/null @@ -1,18 +0,0 @@ -class AccessDeniedException(Exception): - def __init__(self, message): - pass - # Call the base class constructor - # super().__init__(message, None) - - # Now custom code - # self.errors = errors - - -class InvalidEndpointException(Exception): - def __init__(self, message): - self.message = message - - -class BucketMightNotExistException(Exception): - def __init__(self): - pass diff --git a/bucket/_test_/buckets.txt b/bucket/_test_/buckets.txt new file mode 100644 index 0000000..f28a970 --- /dev/null +++ b/bucket/_test_/buckets.txt @@ -0,0 +1,6 @@ +mybucket0 +mybucket1 +mybucket2 +mybucket3 +mybucket4 +_____ \ No newline at end of file diff --git a/bucket/bucket.go b/bucket/bucket.go new file mode 100644 index 0000000..fdfa264 --- /dev/null +++ b/bucket/bucket.go @@ -0,0 +1,265 @@ +package bucket + +import ( + "bufio" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + log "github.com/sirupsen/logrus" + "os" + "regexp" + "s3scanner/groups" + "strings" + "time" + "unicode" +) + +var BucketExists = uint8(1) +var BucketNotExist = uint8(0) +var BucketExistsUnknown = uint8(2) + +var PermissionAllowed = uint8(1) +var PermissionDenied = uint8(0) +var PermissionUnknown = uint8(2) + +// var bucketReIP = regexp.MustCompile(`^[0-9]{1-3}\.[0-9]{1-3}\.[0-9]{1-3}\.[0-9]{1-3}$`) +var bucketRe = regexp.MustCompile(`[^.\-a-z0-9]`) + +// Pattern from https://blogs.easydynamics.com/2016/10/24/aws-s3-bucket-name-validation-regex/ +// Missing: +// No xn-- prefix +// No -s3alias suffix +// https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html + +type Bucket struct { + //gorm.Model + ID uint `gorm:"primarykey" json:",omitempty"` + Name string `json:"name" gorm:"name;size:64;index"` + Region string `json:"region" gorm:"size:20"` + Exists uint8 `json:"exists"` + DateScanned time.Time `json:"date_scanned"` + Objects []BucketObject `json:"objects"` + ObjectsEnumerated bool `json:"objects_enumerated"` + Provider string `json:"provider"` + NumObjects int32 `json:"num_objects"` + + // Total size of all bucket objects in bytes + BucketSize uint64 `json:"bucket_size"` + OwnerId string `json:"owner_id"` + OwnerDisplayName string `json:"owner_display_name"` + + PermAuthUsersRead uint8 `json:"perm_auth_users_read"` + PermAuthUsersWrite uint8 `json:"perm_auth_users_write"` + PermAuthUsersReadACL uint8 `json:"perm_auth_users_read_acl"` + PermAuthUsersWriteACL uint8 `json:"perm_auth_users_write_acl"` + PermAuthUsersFullControl uint8 `json:"perm_auth_users_full_control"` + + PermAllUsersRead uint8 `json:"perm_all_users_read"` + PermAllUsersWrite uint8 `json:"perm_all_users_write"` + PermAllUsersReadACL uint8 `json:"perm_all_users_read_acl"` + PermAllUsersWriteACL uint8 `json:"perm_all_users_write_acl"` + PermAllUsersFullControl uint8 `json:"perm_all_users_full_control"` +} + +type BucketObject struct { + //gorm.Model + ID uint `gorm:"primarykey" json:",omitempty"` + Key string `json:"key" gorm:"type:string;size:1024"` // Keys can be up to 1,024 bytes long, UTF-8 encoded plus an additional byte just in case. https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html + Size uint64 `json:"size"` + BucketID uint `json:",omitempty"` +} + +func NewBucket(name string) Bucket { + return Bucket{ + Name: name, + Exists: BucketExistsUnknown, + ObjectsEnumerated: false, + PermAuthUsersRead: PermissionUnknown, + PermAuthUsersWrite: PermissionUnknown, + PermAuthUsersReadACL: PermissionUnknown, + PermAuthUsersWriteACL: PermissionUnknown, + PermAuthUsersFullControl: PermissionUnknown, + PermAllUsersRead: PermissionUnknown, + PermAllUsersWrite: PermissionUnknown, + PermAllUsersReadACL: PermissionUnknown, + PermAllUsersWriteACL: PermissionUnknown, + PermAllUsersFullControl: PermissionUnknown, + } +} + +func (bucket *Bucket) String() string { + if bucket.Exists == BucketNotExist { + return fmt.Sprintf("%v | bucket_not_exist", bucket.Name) + } + + var authUserPerms []string + if bucket.PermAuthUsersRead == PermissionAllowed { + authUserPerms = append(authUserPerms, "READ") + } + if bucket.PermAuthUsersWrite == PermissionAllowed { + authUserPerms = append(authUserPerms, "WRITE") + } + if bucket.PermAuthUsersReadACL == PermissionAllowed { + authUserPerms = append(authUserPerms, "READ_ACP") + } + if bucket.PermAuthUsersWriteACL == PermissionAllowed { + authUserPerms = append(authUserPerms, "WRITE_ACP") + } + if bucket.PermAuthUsersFullControl == PermissionAllowed { + authUserPerms = append(authUserPerms, "FULL_CONTROL") + } + + var allUsersPerms []string + if bucket.PermAllUsersRead == PermissionAllowed { + allUsersPerms = append(allUsersPerms, "READ") + } + if bucket.PermAllUsersWrite == PermissionAllowed { + allUsersPerms = append(allUsersPerms, "WRITE") + } + if bucket.PermAllUsersReadACL == PermissionAllowed { + allUsersPerms = append(allUsersPerms, "READ_ACP") + } + if bucket.PermAllUsersWriteACL == PermissionAllowed { + allUsersPerms = append(allUsersPerms, "WRITE_ACP") + } + if bucket.PermAllUsersFullControl == PermissionAllowed { + allUsersPerms = append(allUsersPerms, "FULL_CONTROL") + } + + return fmt.Sprintf("AuthUsers: [%v] | AllUsers: [%v]", strings.Join(authUserPerms, ", "), strings.Join(allUsersPerms, ", ")) +} + +func (bucket *Bucket) Permissions() map[*types.Grantee]map[string]uint8 { + return map[*types.Grantee]map[string]uint8{ + groups.AllUsersv2: { + "READ": bucket.PermAllUsersRead, + "WRITE": bucket.PermAllUsersWrite, + "READ_ACP": bucket.PermAllUsersReadACL, + "WRITE_ACP": bucket.PermAllUsersWriteACL, + "FULL_CONTROL": bucket.PermAllUsersFullControl, + }, + groups.AuthenticatedUsersv2: { + "READ": bucket.PermAuthUsersRead, + "WRITE": bucket.PermAuthUsersWrite, + "READ_ACP": bucket.PermAuthUsersReadACL, + "WRITE_ACP": bucket.PermAuthUsersWriteACL, + "FULL_CONTROL": bucket.PermAuthUsersFullControl, + }, + } +} + +func ReadFromFile(bucketFile string, bucketChan chan Bucket) error { + file, err := os.Open(bucketFile) + if err != nil { + return err + } + defer file.Close() + + fileScanner := bufio.NewScanner(file) + for fileScanner.Scan() { + bucketName := strings.TrimSpace(fileScanner.Text()) + if !IsValidS3BucketName(bucketName) { + log.Info(fmt.Sprintf("invalid | %s", bucketName)) + } else { + bucketChan <- NewBucket(strings.ToLower(bucketName)) + } + } + + if ferr := fileScanner.Err(); ferr != nil { + return ferr + } + + return err +} + +// ParseAclOutputv2 TODO: probably move this to providers.go +func (bucket *Bucket) ParseAclOutputv2(aclOutput *s3.GetBucketAclOutput) error { + bucket.OwnerId = *aclOutput.Owner.ID + if aclOutput.Owner.DisplayName != nil { + bucket.OwnerDisplayName = *aclOutput.Owner.DisplayName + } + + for _, b := range aclOutput.Grants { + if b.Grantee == groups.AllUsersv2 { + switch b.Permission { + case types.PermissionRead: + bucket.PermAllUsersRead = PermissionAllowed + case types.PermissionWrite: + bucket.PermAllUsersWrite = PermissionAllowed + case types.PermissionReadAcp: + bucket.PermAllUsersReadACL = PermissionAllowed + case types.PermissionWriteAcp: + bucket.PermAllUsersWriteACL = PermissionAllowed + case types.PermissionFullControl: + bucket.PermAllUsersFullControl = PermissionAllowed + default: + break + } + } + if b.Grantee == groups.AuthenticatedUsersv2 { + switch b.Permission { + case types.PermissionRead: + bucket.PermAuthUsersRead = PermissionAllowed + case types.PermissionWrite: + bucket.PermAuthUsersWrite = PermissionAllowed + case types.PermissionReadAcp: + bucket.PermAuthUsersReadACL = PermissionAllowed + case types.PermissionWriteAcp: + bucket.PermAuthUsersWriteACL = PermissionAllowed + case types.PermissionFullControl: + bucket.PermAuthUsersFullControl = PermissionAllowed + default: + break + } + } + } + return nil +} + +// Permission is a convenience method to convert a boolean into either a PermissionAllowed or PermissionDenied +func Permission(canDo bool) uint8 { + if canDo { + return PermissionAllowed + } else { + return PermissionDenied + } +} + +func IsValidS3BucketName(bucketName string) bool { + // TODO: Optimize the heck out of this + /* + Bucket names must not be formatted as an IP address (for example, 192.168.5.4). + */ + + // Bucket names can consist only of lowercase letters, numbers, dots (.), and hyphens (-). + if bucketRe.MatchString(bucketName) { + return false + } + + // Bucket names must be between 3 (min) and 63 (max) characters long. + if len(bucketName) < 3 || len(bucketName) > 63 { + return false + } + + // Bucket names must begin and end with a letter or number. + firstChar := []rune(bucketName[0:1])[0] + lastChar := []rune(bucketName[len(bucketName)-1:])[0] + if !unicode.IsLetter(firstChar) && !unicode.IsNumber(firstChar) { + return false + } + if !unicode.IsLetter(lastChar) && !unicode.IsNumber(lastChar) { + return false + } + + // Bucket names must not start with the prefix 'xn--' + if strings.HasPrefix(bucketName, "xn--") { + return false + } + + // Bucket names must not end with the suffix "-s3alias" + if strings.HasSuffix(bucketName, "-s3alias") { + return false + } + + return true +} diff --git a/bucket/bucket_test.go b/bucket/bucket_test.go new file mode 100644 index 0000000..35d9363 --- /dev/null +++ b/bucket/bucket_test.go @@ -0,0 +1,352 @@ +package bucket + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/stretchr/testify/assert" + "golang.org/x/sync/errgroup" + "path/filepath" + "runtime" + "s3scanner/groups" + "testing" + "time" +) + +func TestIsValidS3BucketName_Good(t *testing.T) { + t.Parallel() + + goodNames := []string{"my-bucket", "asd", "b-2", "b.2-a", "100", "a.bc", "asdfdshfkhasdfkjhasdjkhfgakjhsdfghkjalksjhflkajshdflkjahsdlfkj"} + + for _, name := range goodNames { + assert.True(t, IsValidS3BucketName(name)) + } +} + +func TestIsValidS3BucketName_Bad(t *testing.T) { + t.Parallel() + + badNames := []string{"a", "aa", ".abc", "-abc", "mybucket-s3alias", "-s3alias", + "abc.", "abc-", "xn--abc", "-000-", "asdfdshfkhasdfkjhasdjkhfgakjhsdfghkjalksjhflkajshdflkjahsdlfkjab"} + + for _, name := range badNames { + assert.False(t, IsValidS3BucketName(name), name) + } +} + +func TestNewBucket(t *testing.T) { + t.Parallel() + + b := NewBucket("mybucket") + assert.Equal(t, PermissionUnknown, b.PermAuthUsersRead) + assert.Equal(t, PermissionUnknown, b.PermAuthUsersWrite) + assert.Equal(t, PermissionUnknown, b.PermAuthUsersReadACL) + assert.Equal(t, PermissionUnknown, b.PermAuthUsersWriteACL) + assert.Equal(t, PermissionUnknown, b.PermAuthUsersFullControl) + assert.Equal(t, PermissionUnknown, b.PermAllUsersRead) + assert.Equal(t, PermissionUnknown, b.PermAllUsersWrite) + assert.Equal(t, PermissionUnknown, b.PermAllUsersReadACL) + assert.Equal(t, PermissionUnknown, b.PermAllUsersWriteACL) + assert.Equal(t, PermissionUnknown, b.PermAllUsersFullControl) + assert.Equal(t, BucketExistsUnknown, b.Exists) + assert.False(t, b.ObjectsEnumerated) + assert.Equal(t, "mybucket", b.Name) +} + +type testOwner struct { + DisplayName string + ID string +} + +func TestBucket_ParseAclOutputv2(t *testing.T) { + t.Parallel() + + o := testOwner{ + DisplayName: "Test User", + ID: "1234", + } + + cannedACLPrivate := s3.GetBucketAclOutput{ + Grants: []types.Grant{}, + Owner: &types.Owner{ + DisplayName: &o.DisplayName, + ID: &o.ID, + }, + } + cannedACLPublicRead := s3.GetBucketAclOutput{ + Grants: []types.Grant{ + { + Grantee: groups.AllUsersv2, + Permission: "READ", + }, + }, + Owner: &types.Owner{ + DisplayName: &o.DisplayName, + ID: &o.ID, + }, + } + cannedACLPublicReadWrite := s3.GetBucketAclOutput{ + Grants: []types.Grant{ + { + Grantee: groups.AllUsersv2, + Permission: "READ", + }, + { + Grantee: groups.AllUsersv2, + Permission: "WRITE", + }, + }, + Owner: &types.Owner{ + DisplayName: &o.DisplayName, + ID: &o.ID, + }, + } + publicReadACL := s3.GetBucketAclOutput{ + Grants: []types.Grant{ + { + Grantee: groups.AllUsersv2, + Permission: "READ_ACP", + }, + }, + Owner: &types.Owner{ + DisplayName: &o.DisplayName, + ID: &o.ID, + }, + } + publicWriteACL := s3.GetBucketAclOutput{ + Grants: []types.Grant{ + { + Grantee: groups.AllUsersv2, + Permission: "WRITE_ACP", + }, + }, + Owner: &types.Owner{ + DisplayName: &o.DisplayName, + ID: &o.ID, + }, + } + cannedACLPublicFullControl := s3.GetBucketAclOutput{ + Grants: []types.Grant{ + { + Grantee: groups.AllUsersv2, + Permission: "FULL_CONTROL", + }, + }, + Owner: &types.Owner{ + DisplayName: &o.DisplayName, + ID: &o.ID, + }, + } + cannedACLAuthRead := s3.GetBucketAclOutput{ + Grants: []types.Grant{ + { + Grantee: groups.AuthenticatedUsersv2, + Permission: "READ", + }, + }, + Owner: &types.Owner{ + DisplayName: &o.DisplayName, + ID: &o.ID, + }, + } + cannedACLAuthReadWrite := s3.GetBucketAclOutput{ + Grants: []types.Grant{ + { + Grantee: groups.AuthenticatedUsersv2, + Permission: "READ", + }, + { + Grantee: groups.AuthenticatedUsersv2, + Permission: "WRITE", + }, + }, + Owner: &types.Owner{ + DisplayName: &o.DisplayName, + ID: &o.ID, + }, + } + authReadACL := s3.GetBucketAclOutput{ + Grants: []types.Grant{ + { + Grantee: groups.AuthenticatedUsersv2, + Permission: "READ_ACP", + }, + }, + Owner: &types.Owner{ + DisplayName: &o.DisplayName, + ID: &o.ID, + }, + } + authWriteACL := s3.GetBucketAclOutput{ + Grants: []types.Grant{ + { + Grantee: groups.AuthenticatedUsersv2, + Permission: "WRITE_ACP", + }, + }, + Owner: &types.Owner{ + DisplayName: &o.DisplayName, + ID: &o.ID, + }, + } + cannedACLAuthFullControl := s3.GetBucketAclOutput{ + Grants: []types.Grant{ + { + Grantee: groups.AuthenticatedUsersv2, + Permission: "FULL_CONTROL", + }, + }, + Owner: &types.Owner{ + DisplayName: &o.DisplayName, + ID: &o.ID, + }, + } + + var tests = []struct { + name string + acl s3.GetBucketAclOutput + expectedAllowed map[*types.Grantee][]string + expectedDenied map[*types.Grantee][]string + }{ + {name: "private", acl: cannedACLPrivate}, + {name: "public read", acl: cannedACLPublicRead, expectedAllowed: map[*types.Grantee][]string{ + groups.AllUsersv2: {"READ"}, + }}, + {name: "public read-write", acl: cannedACLPublicReadWrite, expectedAllowed: map[*types.Grantee][]string{ + groups.AllUsersv2: {"READ", "WRITE"}, + }}, + {name: "public read acl", acl: publicReadACL, expectedAllowed: map[*types.Grantee][]string{ + groups.AllUsersv2: {"READ_ACP"}, + }}, + {name: "public write acl", acl: publicWriteACL, expectedAllowed: map[*types.Grantee][]string{ + groups.AllUsersv2: {"WRITE_ACP"}, + }}, + {name: "public full control", acl: cannedACLPublicFullControl, expectedAllowed: map[*types.Grantee][]string{ + groups.AllUsersv2: {"FULL_CONTROL"}, + }}, + {name: "auth read", acl: cannedACLAuthRead, expectedAllowed: map[*types.Grantee][]string{ + groups.AuthenticatedUsersv2: {"READ"}, + }}, + {name: "auth read-write", acl: cannedACLAuthReadWrite, expectedAllowed: map[*types.Grantee][]string{ + groups.AuthenticatedUsersv2: {"READ", "WRITE"}, + }}, + {name: "auth read acl", acl: authReadACL, expectedAllowed: map[*types.Grantee][]string{ + groups.AuthenticatedUsersv2: {"READ_ACP"}, + }}, + {name: "auth write acl", acl: authWriteACL, expectedAllowed: map[*types.Grantee][]string{ + groups.AuthenticatedUsersv2: {"WRITE_ACP"}, + }}, + {name: "auth full control", acl: cannedACLAuthFullControl, expectedAllowed: map[*types.Grantee][]string{ + groups.AuthenticatedUsersv2: {"FULL_CONTROL"}, + }}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t2 *testing.T) { + b := NewBucket("mytestbucket") + err := b.ParseAclOutputv2(&tt.acl) + assert.Nil(t2, err) + + for grantee, perms := range tt.expectedAllowed { + for _, perm := range perms { + assert.Equal(t2, PermissionAllowed, b.Permissions()[grantee][perm]) + } + } + for grantee, perms := range tt.expectedDenied { + for _, perm := range perms { + assert.Equal(t2, PermissionDenied, b.Permissions()[grantee][perm]) + } + } + }) + } +} + +func TestReadFromFile(t *testing.T) { + t.Parallel() + + _, filename, _, _ := runtime.Caller(0) + testFile := fmt.Sprintf("%s/_test_/buckets.txt", filepath.Dir(filename)) + + testChan := make(chan Bucket) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + g, _ := errgroup.WithContext(ctx) + defer cancel() + + g.Go(func() error { + err := ReadFromFile(testFile, testChan) + close(testChan) + return err + }) + + var i = 0 + for b := range testChan { + assert.Equal(t, fmt.Sprintf("mybucket%v", i), b.Name) + i++ + } + assert.Equal(t, 5, i) + + if err := g.Wait(); err != nil { + t.Error(err) + } +} + +func TestBucket_String(t *testing.T) { + t.Parallel() + + var tests = []struct { + name string + bucket Bucket + string string + }{ + {name: "public read", bucket: Bucket{ + Exists: BucketExists, + PermAllUsersRead: PermissionAllowed, + }, string: "AuthUsers: [] | AllUsers: [READ]"}, + {name: "public read-write", bucket: Bucket{ + Exists: BucketExists, + PermAllUsersRead: PermissionAllowed, + PermAllUsersWrite: PermissionAllowed, + }, string: "AuthUsers: [] | AllUsers: [READ, WRITE]"}, + {name: "public read acl", bucket: Bucket{ + Exists: BucketExists, + PermAllUsersReadACL: PermissionAllowed, + }, string: "AuthUsers: [] | AllUsers: [READ_ACP]"}, + {name: "public write acl", bucket: Bucket{ + Exists: BucketExists, + PermAllUsersWriteACL: PermissionAllowed, + }, string: "AuthUsers: [] | AllUsers: [WRITE_ACP]"}, + {name: "public full control", bucket: Bucket{ + Exists: BucketExists, + PermAllUsersFullControl: PermissionAllowed, + }, string: "AuthUsers: [] | AllUsers: [FULL_CONTROL]"}, + {name: "auth read", bucket: Bucket{ + Exists: BucketExists, + PermAuthUsersRead: PermissionAllowed, + }, string: "AuthUsers: [READ] | AllUsers: []"}, + {name: "auth read-write", bucket: Bucket{ + Exists: BucketExists, + PermAuthUsersRead: PermissionAllowed, + PermAuthUsersWrite: PermissionAllowed, + }, string: "AuthUsers: [READ, WRITE] | AllUsers: []"}, + {name: "auth read acl", bucket: Bucket{ + Exists: BucketExists, + PermAuthUsersReadACL: PermissionAllowed, + }, string: "AuthUsers: [READ_ACP] | AllUsers: []"}, + {name: "auth write acl", bucket: Bucket{ + Exists: BucketExists, + PermAuthUsersWriteACL: PermissionAllowed, + }, string: "AuthUsers: [WRITE_ACP] | AllUsers: []"}, + {name: "auth full control", bucket: Bucket{ + Exists: BucketExists, + PermAuthUsersFullControl: PermissionAllowed, + }, string: "AuthUsers: [FULL_CONTROL] | AllUsers: []"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t2 *testing.T) { + assert.Equal(t2, tt.string, tt.bucket.String()) + }) + } + +} diff --git a/cmd/mqingest/mqingest.go b/cmd/mqingest/mqingest.go new file mode 100644 index 0000000..f2e76d0 --- /dev/null +++ b/cmd/mqingest/mqingest.go @@ -0,0 +1,123 @@ +package main + +import ( + "bufio" + "encoding/json" + "flag" + "fmt" + "github.com/streadway/amqp" + "log" + "os" + "s3scanner/bucket" + "strings" +) + +func failOnError(err error, msg string) { + if err != nil { + log.Printf("%v - %v\n", msg, err) + os.Exit(1) + } +} + +func printUsage() { + fmt.Println("mqingest takes in a file of bucket names, one per line, and publishes them to a RabbitMQ queue") + flag.PrintDefaults() +} + +// Name of queue should indicate endpoint, no need to put endpoint in messgae + +type BucketMessage struct { + BucketName string `json:"bucket_name"` +} + +func main() { + var filename string + var url string + var queue_name string + + flag.StringVar(&filename, "file", "", "File name of buckets to send to MQ") + flag.StringVar(&url, "url", "amqp://guest:guest@localhost:5672/", "AMQP URI of RabbitMQ server") + flag.StringVar(&queue_name, "queue", "", "Name of message queue to publish buckets to") + + flag.Parse() + + if filename == "" || queue_name == "" { + fmt.Println("Flags 'file' and 'queue' are required") + printUsage() + os.Exit(1) + } + + conn, err := amqp.Dial(url) + failOnError(err, "Failed to connect to RabbitMQ") + defer conn.Close() + + ch, err := conn.Channel() + failOnError(err, "Failed to open a channel") + defer ch.Close() + + // Declare dead letter queue + dlq, dlErr := ch.QueueDeclare(queue_name+"_dead", true, false, false, + false, nil) + failOnError(dlErr, "Failed to declare dead letter queue") + + q, err := ch.QueueDeclare( + queue_name, // name + true, // durable + false, // delete when unused + false, // exclusive + false, // no-wait + amqp.Table{ + "x-dead-letter-exchange": "", + "x-dead-letter-routing-key": dlq.Name, + }, + ) + if err != nil { + failOnError(err, "Failed to declare a queue") + } + + err = ch.Qos( + 1, // prefetch count + 0, // prefetch size + false, // global + ) + if err != nil { + failOnError(err, "Failed to set QoS on channel") + } + + file, err := os.Open(filename) + if err != nil { + failOnError(err, "Failed to open file") + } + defer file.Close() + + msgsPublished := 0 + + fileScanner := bufio.NewScanner(file) + for fileScanner.Scan() { + bucketName := strings.TrimSpace(fileScanner.Text()) + //bucketMsg := BucketMessage{BucketName: bucketName} + bucketMsg := bucket.Bucket{Name: bucketName} + bucketBytes, err := json.Marshal(bucketMsg) + if err != nil { + failOnError(err, "Failed to marshal bucket msg") + } + + err = ch.Publish( + "", + q.Name, + false, + false, + amqp.Publishing{Body: bucketBytes, DeliveryMode: amqp.Persistent}, + ) + if err != nil { + failOnError(err, "Failed to publish to channel") + } + msgsPublished += 1 + } + if err := fileScanner.Err(); err != nil { + failOnError(err, "fileScanner failed") + } + + log.Printf("%v bucket names published to queue %v\n", msgsPublished, queue_name) + +} diff --git a/config.yml b/config.yml new file mode 100644 index 0000000..7382bda --- /dev/null +++ b/config.yml @@ -0,0 +1,14 @@ +db: + uri: "postgresql://postgres:example@db_dev:5432/postgres" + +mq: + queue_name: "aws" + uri: "amqp://guest:guest@localhost:5672" + +providers: + custom: + insecure: false + endpoint_format: "https://$REGION.vultrobjects.com" + regions: + - "ewr1" + address_style: "path" \ No newline at end of file diff --git a/conftest.py b/conftest.py deleted file mode 100644 index 33b565c..0000000 --- a/conftest.py +++ /dev/null @@ -1,15 +0,0 @@ -#### -# Pytest Configuration -#### - - -def pytest_addoption(parser): - parser.addoption("--do-dangerous", action="store_true", - help="Run all tests, including ones where buckets are created.") - - -def pytest_generate_tests(metafunc): - if "do_dangerous_test" in metafunc.fixturenames: - do_dangerous_test = True if metafunc.config.getoption("do_dangerous") else False - print("do_dangerous_test: " + str(do_dangerous_test)) - metafunc.parametrize("do_dangerous_test", [do_dangerous_test]) \ No newline at end of file diff --git a/db/database.go b/db/database.go new file mode 100644 index 0000000..add3316 --- /dev/null +++ b/db/database.go @@ -0,0 +1,62 @@ +package db + +import ( + "gorm.io/driver/postgres" + "gorm.io/gorm" + "gorm.io/gorm/logger" + "log" + "os" + "s3scanner/bucket" + "time" +) + +var db *gorm.DB + +func Connect(dbConn string, migrate bool) error { + // Connect to the database and run migrations if needed + + // We've already connected + // TODO: Replace this with a sync.Once pattern + if db != nil { + return nil + } + + gormLogger := logger.New( + log.New(os.Stdout, "\r\n", log.LstdFlags), // io writer + logger.Config{ + SlowThreshold: time.Hour, // Slow SQL threshold + LogLevel: logger.Error, // Log level + IgnoreRecordNotFoundError: true, // Ignore ErrRecordNotFound error for logger + Colorful: true, // Enable color + }, + ) + + // https://github.com/go-gorm/postgres + database, err := gorm.Open(postgres.New(postgres.Config{ + DSN: dbConn, + PreferSimpleProtocol: true, // disables implicit prepared statement usage + }), &gorm.Config{ + Logger: gormLogger, + }) + + if err != nil { + return err + } + + if migrate { + err = database.AutoMigrate(&bucket.Bucket{}, &bucket.BucketObject{}) + if err != nil { + return err + } + } + + db = database + + return nil +} +func StoreBucket(b *bucket.Bucket) error { + if b.Exists == bucket.BucketNotExist { + return nil + } + return db.Session(&gorm.Session{CreateBatchSize: 1000, FullSaveAssociations: true}).Create(&b).Error +} diff --git a/db/database_test.go b/db/database_test.go new file mode 100644 index 0000000..e9ac25c --- /dev/null +++ b/db/database_test.go @@ -0,0 +1,85 @@ +package db + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "math/rand" + "os" + "s3scanner/bucket" + "testing" + "time" +) + +var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_/0123456789.") + +func RandStringRunes(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) +} + +func makeRandomBucket(numObjects int) bucket.Bucket { + c := bucket.Bucket{ + Name: fmt.Sprintf("s3scanner-test_%s", RandStringRunes(8)), + Region: "us-east-1", + Exists: 1, + DateScanned: time.Now(), + ObjectsEnumerated: true, + Provider: "aws", + BucketSize: 0, + PermAuthUsersRead: uint8(rand.Intn(2)), + PermAuthUsersWrite: uint8(rand.Intn(2)), + PermAuthUsersReadACL: uint8(rand.Intn(2)), + PermAuthUsersWriteACL: uint8(rand.Intn(2)), + PermAuthUsersFullControl: uint8(rand.Intn(2)), + PermAllUsersRead: uint8(rand.Intn(2)), + PermAllUsersWrite: uint8(rand.Intn(2)), + PermAllUsersReadACL: uint8(rand.Intn(2)), + PermAllUsersWriteACL: uint8(rand.Intn(2)), + PermAllUsersFullControl: uint8(rand.Intn(2)), + } + bucketObjects := make([]bucket.BucketObject, numObjects) + for j := 0; j < numObjects; j++ { + obj := bucket.BucketObject{ + Key: RandStringRunes(50), + Size: uint64(rand.Intn(250000000000)), // 25GB max + } + c.BucketSize += obj.Size + bucketObjects[j] = obj + } + c.Objects = bucketObjects + return c +} + +func BenchmarkStoreBucket(b *testing.B) { + rand.Seed(time.Now().UnixNano()) + + // Connect to database + err := Connect("host=localhost user=postgres password=example dbname=postgres port=5432 sslmode=disable TimeZone=America/Chicago", false) + if err != nil { + b.Error(err) + } + + for i := 0; i < b.N; i++ { + c := makeRandomBucket(50) + sErr := StoreBucket(&c) + if sErr != nil { + b.Error(sErr) + } + } +} + +func TestStoreBucket(t *testing.T) { + _, testDB := os.LookupEnv("TEST_DB") + if !testDB { + t.Skip("TEST_DB not enabled") + } + err := Connect("host=localhost user=postgres password=example dbname=postgres port=5432 sslmode=disable", true) + assert.Nil(t, err) + + b := makeRandomBucket(100) + sErr := StoreBucket(&b) + assert.Nil(t, sErr) +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..5966bea --- /dev/null +++ b/go.mod @@ -0,0 +1,60 @@ +module s3scanner + +go 1.18 + +require ( + github.com/aws/aws-sdk-go-v2 v1.18.1 + github.com/aws/aws-sdk-go-v2/config v1.18.27 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.70 + github.com/aws/aws-sdk-go-v2/service/s3 v1.35.0 + github.com/dustin/go-humanize v1.0.1 + github.com/sirupsen/logrus v1.9.3 + github.com/spf13/viper v1.16.0 + github.com/streadway/amqp v1.1.0 + github.com/stretchr/testify v1.8.3 + golang.org/x/sync v0.1.0 + gorm.io/driver/postgres v1.5.2 + gorm.io/gorm v1.25.1 +) + +require ( + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.26 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.35 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.29 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.12.12 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.12 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.19.2 // indirect + github.com/aws/smithy-go v1.13.5 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgx/v5 v5.4.1 // indirect + github.com/jinzhu/inflection v1.0.0 // indirect + github.com/jinzhu/now v1.1.5 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.4.2 // indirect + golang.org/x/crypto v0.10.0 // indirect + golang.org/x/sys v0.9.0 // indirect + golang.org/x/text v0.10.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..e783ee9 --- /dev/null +++ b/go.sum @@ -0,0 +1,553 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/aws/aws-sdk-go-v2 v1.18.1 h1:+tefE750oAb7ZQGzla6bLkOwfcQCEtC5y2RqoqCeqKo= +github.com/aws/aws-sdk-go-v2 v1.18.1/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno= +github.com/aws/aws-sdk-go-v2/config v1.18.27 h1:Az9uLwmssTE6OGTpsFqOnaGpLnKDqNYOJzWuC6UAYzA= +github.com/aws/aws-sdk-go-v2/config v1.18.27/go.mod h1:0My+YgmkGxeqjXZb5BYme5pc4drjTnM+x1GJ3zv42Nw= +github.com/aws/aws-sdk-go-v2/credentials v1.13.26 h1:qmU+yhKmOCyujmuPY7tf5MxR/RKyZrOPO3V4DobiTUk= +github.com/aws/aws-sdk-go-v2/credentials v1.13.26/go.mod h1:GoXt2YC8jHUBbA4jr+W3JiemnIbkXOfxSXcisUsZ3os= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.4 h1:LxK/bitrAr4lnh9LnIS6i7zWbCOdMsfzKFBI6LUCS0I= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.4/go.mod h1:E1hLXN/BL2e6YizK1zFlYd8vsfi2GTjbjBazinMmeaM= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.70 h1:4bh28MeeXoBFTjb0JjQ5sVatzlf5xA1DziV8mZed9v4= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.70/go.mod h1:9yI5NXzqy2yOiMytv6QLZHvlyHLwYxO9iIq+bZIbrFg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34 h1:A5UqQEmPaCFpedKouS4v+dHCTUo2sKqhoKO9U5kxyWo= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34/go.mod h1:wZpTEecJe0Btj3IYnDx/VlUzor9wm3fJHyvLpQF0VwY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28 h1:srIVS45eQuewqz6fKKu6ZGXaq6FuFg5NzgQBAM6g8Y4= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28/go.mod h1:7VRpKQQedkfIEXb4k52I7swUnZP0wohVajJMRn3vsUw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.35 h1:LWA+3kDM8ly001vJ1X1waCuLJdtTl48gwkPKWy9sosI= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.35/go.mod h1:0Eg1YjxE0Bhn56lx+SHJwCzhW+2JGtizsrx+lCqrfm0= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26 h1:wscW+pnn3J1OYnanMnza5ZVYXLX4cKk5rAvUAl4Qu+c= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26/go.mod h1:MtYiox5gvyB+OyP0Mr0Sm/yzbEAIPL9eijj/ouHAPw0= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 h1:y2+VQzC6Zh2ojtV2LoC0MNwHWc6qXv/j2vrQtlftkdA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.29 h1:zZSLP3v3riMOP14H7b4XP0uyfREDQOYv2cqIrvTXDNQ= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.29/go.mod h1:z7EjRjVwZ6pWcWdI2H64dKttvzaP99jRIj5hphW0M5U= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28 h1:bkRyG4a929RCnpVSTvLM2j/T4ls015ZhhYApbmYs15s= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28/go.mod h1:jj7znCIg05jXlaGBlFMGP8+7UN3VtCkRBG2spnmRQkU= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.3 h1:dBL3StFxHtpBzJJ/mNEsjXVgfO+7jR0dAIEwLqMapEA= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.3/go.mod h1:f1QyiAsvIv4B49DmCqrhlXqyaR+0IxMmyX+1P+AnzOM= +github.com/aws/aws-sdk-go-v2/service/s3 v1.35.0 h1:ya7fmrN2fE7s1P2gaPbNg5MTkERVWfsH8ToP1YC4Z9o= +github.com/aws/aws-sdk-go-v2/service/s3 v1.35.0/go.mod h1:aVbf0sko/TsLWHx30c/uVu7c62+0EAJ3vbxaJga0xCw= +github.com/aws/aws-sdk-go-v2/service/sso v1.12.12 h1:nneMBM2p79PGWBQovYO/6Xnc2ryRMw3InnDJq1FHkSY= +github.com/aws/aws-sdk-go-v2/service/sso v1.12.12/go.mod h1:HuCOxYsF21eKrerARYO6HapNeh9GBNq7fius2AcwodY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.12 h1:2qTR7IFk7/0IN/adSFhYu9Xthr0zVFTgBrmPldILn80= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.12/go.mod h1:E4VrHCPzmVB/KFXtqBGKb3c8zpbNBgKe3fisDNLAW5w= +github.com/aws/aws-sdk-go-v2/service/sts v1.19.2 h1:XFJ2Z6sNUUcAz9poj+245DMkrHE4h2j5I9/xD50RHfE= +github.com/aws/aws-sdk-go-v2/service/sts v1.19.2/go.mod h1:dp0yLPsLBOi++WTxzCjA/oZqi6NPIhoR+uF7GeMU9eg= +github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= +github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.4.1 h1:oKfB/FhuVtit1bBM3zNRRsZ925ZkMN3HXL+LgLUM9lE= +github.com/jackc/pgx/v5 v5.4.1/go.mod h1:q6iHT8uDNXWiFNOlRqJzBTaSH3+2xCXkokxHZC5qWFY= +github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= +github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= +github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= +github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= +github.com/streadway/amqp v1.1.0 h1:py12iX8XSyI7aN/3dUT8DFIDJazNJsVJdxNVEpnQTZM= +github.com/streadway/amqp v1.1.0/go.mod h1:WYSrTEYHOXHd0nwFeUXAe2G2hRnQT+deZJJf88uS9Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gorm.io/driver/postgres v1.5.2 h1:ytTDxxEv+MplXOfFe3Lzm7SjG09fcdb3Z/c056DTBx0= +gorm.io/driver/postgres v1.5.2/go.mod h1:fmpX0m2I1PKuR7mKZiEluwrP3hbs+ps7JIGMUBpCgl8= +gorm.io/gorm v1.25.1 h1:nsSALe5Pr+cM3V1qwwQ7rOkw+6UeLrX5O4v3llhHa64= +gorm.io/gorm v1.25.1/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/groups/groups.go b/groups/groups.go new file mode 100644 index 0000000..efce294 --- /dev/null +++ b/groups/groups.go @@ -0,0 +1,17 @@ +package groups + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3/types" +) + +var AllUsersv2 = &types.Grantee{ + Type: types.TypeGroup, + URI: aws.String("http://acs.amazonaws.com/groups/global/AllUsers")} + +var AuthenticatedUsersv2 = &types.Grantee{ + Type: types.TypeGroup, + URI: aws.String("http://acs.amazonaws.com/groups/global/AllUsers")} + +const ALL_USERS_URI = "uri=http://acs.amazonaws.com/groups/global/AllUsers" +const AUTH_USERS_URI = "uri=http://acs.amazonaws.com/groups/global/AuthenticatedUsers" diff --git a/log/formatter_hook.go b/log/formatter_hook.go new file mode 100644 index 0000000..3aa1778 --- /dev/null +++ b/log/formatter_hook.go @@ -0,0 +1,43 @@ +package log + +import ( + log "github.com/sirupsen/logrus" + "io" +) + +// Code from: https://github.com/sirupsen/logrus/issues/894#issuecomment-1284051207 + +// FormatterHook is a hook that writes logs of specified LogLevels with a formatter to specified Writer +type FormatterHook struct { + Writer io.Writer + LogLevels []log.Level + Formatter log.Formatter + DefaultFields log.Fields +} + +// Fire will be called when some logging function is called with current hook +// It will format log entry and write it to appropriate writer +func (hook *FormatterHook) Fire(entry *log.Entry) error { + + // Add default fields to any set by the user (entry.Data) + newFieldsMap := make(log.Fields, len(hook.DefaultFields)+len(entry.Data)) + for k, v := range entry.Data { + newFieldsMap[k] = v + } + for k, v := range hook.DefaultFields { + newFieldsMap[k] = v + } + entry.Data = newFieldsMap + + line, err := hook.Formatter.Format(entry) + if err != nil { + return err + } + _, err = hook.Writer.Write(line) + return err +} + +// Levels define on which log levels this hook would trigger +func (hook *FormatterHook) Levels() []log.Level { + return hook.LogLevels +} diff --git a/log/nested_json_formatter.go b/log/nested_json_formatter.go new file mode 100644 index 0000000..8201e6b --- /dev/null +++ b/log/nested_json_formatter.go @@ -0,0 +1,38 @@ +package log + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/sirupsen/logrus" +) + +type NestedJSONFormatter struct { + PrettyPrint bool + DisableHTMLEscape bool +} + +func (f *NestedJSONFormatter) Format(entry *logrus.Entry) ([]byte, error) { + data := make(logrus.Fields, len(entry.Data)+4) + data["level"] = entry.Level.String() + data["msg"] = entry.Message + data["time"] = entry.Time + + val, ok := entry.Data["bucket"] + if ok { + data["bucket"] = val + } + + b := &bytes.Buffer{} + + encoder := json.NewEncoder(b) + encoder.SetEscapeHTML(!f.DisableHTMLEscape) + if f.PrettyPrint { + encoder.SetIndent("", " ") + } + if err := encoder.Encode(data); err != nil { + return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) + } + + return b.Bytes(), nil +} diff --git a/main.go b/main.go new file mode 100644 index 0000000..47e47b5 --- /dev/null +++ b/main.go @@ -0,0 +1,473 @@ +package main + +import ( + "bytes" + "encoding/json" + "errors" + "flag" + "fmt" + "github.com/dustin/go-humanize" + log "github.com/sirupsen/logrus" + "github.com/spf13/viper" + "github.com/streadway/amqp" + "os" + "reflect" + "s3scanner/bucket" + "s3scanner/db" + log2 "s3scanner/log" + "s3scanner/mq" + . "s3scanner/provider" + "strings" + "sync" + "text/tabwriter" +) + +func failOnError(err error, msg string) { + if err != nil { + log.Fatalf("%s: %s", msg, err) + } +} + +func printResult(b *bucket.Bucket) { + if args.json { + log.WithField("bucket", b).Info() + return + } + + if b.Exists == bucket.BucketNotExist { + log.Infof("not_exist | %s", b.Name) + return + } + + result := fmt.Sprintf("exists | %v | %v | %v", b.Name, b.Region, b.String()) + if b.ObjectsEnumerated { + result = fmt.Sprintf("%v | %v objects (%v)", result, len(b.Objects), humanize.Bytes(b.BucketSize)) + } + log.Info(result) +} + +func work(wg *sync.WaitGroup, buckets chan bucket.Bucket, provider StorageProvider, enumerate bool, writeToDB bool) { + defer wg.Done() + for b1 := range buckets { + b, existsErr := provider.BucketExists(&b1) + if existsErr != nil { + log.Errorf("error | %s | %s", b.Name, existsErr.Error()) + continue + } + + if b.Exists == bucket.BucketNotExist { + printResult(b) + continue + } + + // Scan permissions + scanErr := provider.Scan(b, false) + if scanErr != nil { + log.WithFields(log.Fields{"bucket": b}).Error(scanErr) + } + + if enumerate && b.PermAllUsersRead == bucket.PermissionAllowed { + log.WithFields(log.Fields{"method": "main.work()", + "bucket_name": b.Name, "region": b.Region}).Debugf("enumerating objects...") + enumErr := provider.Enumerate(b) + if enumErr != nil { + log.Errorf("Error enumerating bucket '%s': %v\nEnumerated objects: %v", b.Name, enumErr, len(b.Objects)) + continue + } + } + printResult(b) + + if writeToDB { + dbErr := db.StoreBucket(b) + if dbErr != nil { + log.Error(dbErr) + } + } + } +} + +func mqwork(threadId int, wg *sync.WaitGroup, conn *amqp.Connection, provider StorageProvider, queue string, threads int, + doEnumerate bool, writeToDB bool) { + _, once := os.LookupEnv("TEST_MQ") // If we're being tested, exit after one bucket is scanned + defer wg.Done() + + // Wrap the whole thing in a for (while) loop so if the mq server kills the channel, we start it up again + for { + ch, chErr := mq.Connect(conn, queue, threads, threadId) + if chErr != nil { + failOnError(chErr, "couldn't connect to message queue") + } + + msgs, consumeErr := ch.Consume(queue, fmt.Sprintf("%s_%v", queue, threadId), false, false, false, false, nil) + if consumeErr != nil { + log.Error(fmt.Errorf("failed to register a consumer: %w", consumeErr)) + return + } + + for j := range msgs { + bucketToScan := bucket.Bucket{} + + unmarshalErr := json.Unmarshal(j.Body, &bucketToScan) + if unmarshalErr != nil { + log.Error(unmarshalErr) + } + + if !bucket.IsValidS3BucketName(bucketToScan.Name) { + log.Info(fmt.Sprintf("invalid | %s", bucketToScan.Name)) + failOnError(j.Ack(false), "failed to ack") + continue + } + + b, existsErr := provider.BucketExists(&bucketToScan) + if existsErr != nil { + log.WithFields(log.Fields{"bucket": b.Name, "step": "checkExists"}).Error(existsErr) + failOnError(j.Reject(false), "failed to reject") + } + if b.Exists == bucket.BucketNotExist { + // ack the message and skip to the next + log.Infof("not_exist | %s", b.Name) + failOnError(j.Ack(false), "failed to ack") + continue + } + + scanErr := provider.Scan(b, false) + if scanErr != nil { + log.WithFields(log.Fields{"bucket": b}).Error(scanErr) + failOnError(j.Reject(false), "failed to reject") + continue + } + + if doEnumerate { + if b.PermAllUsersRead != bucket.PermissionAllowed { + printResult(&bucketToScan) + failOnError(j.Ack(false), "failed to ack") + if writeToDB { + dbErr := db.StoreBucket(&bucketToScan) + if dbErr != nil { + log.Error(dbErr) + } + } + continue + } + + log.WithFields(log.Fields{"method": "main.mqwork()", + "bucket_name": b.Name, "region": b.Region}).Debugf("enumerating objects...") + + enumErr := provider.Enumerate(b) + if enumErr != nil { + log.Errorf("Error enumerating bucket '%s': %v\nEnumerated objects: %v", b.Name, enumErr, len(b.Objects)) + failOnError(j.Reject(false), "failed to reject") + } + } + + printResult(&bucketToScan) + ackErr := j.Ack(false) + if ackErr != nil { + // Acknowledge mq message. May fail if we've taken too long and the server has closed the channel + // If it has, we break and start at the top of the outer for-loop again which re-establishes a new + // channel + log.WithFields(log.Fields{"bucket": b}).Error(ackErr) + break + } + + // Write to database + if writeToDB { + dbErr := db.StoreBucket(&bucketToScan) + if dbErr != nil { + log.Error(dbErr) + } + } + if once { + return + } + } + } +} + +type flagSetting struct { + indentLevel int + category int +} + +type argCollection struct { + bucketFile string + bucketName string + doEnumerate bool + json bool + providerFlag string + threads int + useMq bool + verbose bool + version bool + writeToDB bool +} + +func (args argCollection) Validate() error { + // Validate: only 1 input flag is provided + numInputFlags := 0 + if args.useMq { + numInputFlags += 1 + } + if args.bucketName != "" { + numInputFlags += 1 + } + if args.bucketFile != "" { + numInputFlags += 1 + } + if numInputFlags != 1 { + return errors.New("exactly one of: -bucket, -bucket-file, -mq required") + } + + return nil +} + +/* +validateConfig checks that the config file contains all necessary keys according to the args specified +*/ +func validateConfig(args argCollection) error { + expectedKeys := []string{} + configFileRequired := false + if args.providerFlag == "custom" { + configFileRequired = true + expectedKeys = append(expectedKeys, []string{"providers.custom.insecure", "providers.custom.endpoint_format", "providers.custom.regions", "providers.custom.address_style"}...) + } + if args.writeToDB { + configFileRequired = true + expectedKeys = append(expectedKeys, []string{"db.uri"}...) + } + if args.useMq { + configFileRequired = true + expectedKeys = append(expectedKeys, []string{"mq.queue_name", "mq.uri"}...) + } + // User didn't give any arguments that require the config file + if !configFileRequired { + return nil + } + + // Try to find and read config file + if err := viper.ReadInConfig(); err != nil { + if _, ok := err.(viper.ConfigFileNotFoundError); ok { + log.Error("config file not found") + os.Exit(1) + } else { + panic(fmt.Errorf("fatal error config file: %w", err)) + } + } + + // Verify all expected keys are in the config file + for _, k := range expectedKeys { + if !viper.IsSet(k) { + return fmt.Errorf("config file missing key: %s", k) + } + } + return nil +} + +const ( + CategoryInput int = 0 + CategoryOutput int = 1 + CategoryOptions int = 2 + CategoryDebug int = 3 +) + +var configPaths = []string{".", "/etc/s3scanner/", "$HOME/.s3scanner/"} + +var version = "dev" +var args = argCollection{} + +func main() { + // https://twin.sh/articles/39/go-concurrency-goroutines-worker-pools-and-throttling-made-simple + // https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#AnonymousCredentials + + viper.SetConfigName("config") // name of config file (without extension) + viper.SetConfigType("yml") // REQUIRED if the config file does not have the extension in the name + for _, p := range configPaths { + viper.AddConfigPath(p) + } + + flagSettings := make(map[string]flagSetting, 11) + flag.StringVar(&args.providerFlag, "provider", "aws", fmt.Sprintf( + "Object storage provider: %s - custom requires config file.", + strings.Join(AllProviders, ", "))) + flagSettings["provider"] = flagSetting{category: CategoryOptions} + flag.StringVar(&args.bucketName, "bucket", "", "Name of bucket to check.") + flagSettings["bucket"] = flagSetting{category: CategoryInput} + flag.StringVar(&args.bucketFile, "bucket-file", "", "File of bucket names to check.") + flagSettings["bucket-file"] = flagSetting{category: CategoryInput} + flag.BoolVar(&args.useMq, "mq", false, "Connect to RabbitMQ to get buckets. Requires config file key \"mq\".") + flagSettings["mq"] = flagSetting{category: CategoryInput} + + flag.BoolVar(&args.writeToDB, "db", false, "Save results to a Postgres database. Requires config file key \"db.uri\".") + flagSettings["db"] = flagSetting{category: CategoryOutput} + flag.BoolVar(&args.json, "json", false, "Print logs to stdout in JSON format instead of human-readable.") + flagSettings["json"] = flagSetting{category: CategoryOutput} + + flag.BoolVar(&args.doEnumerate, "enumerate", false, "Enumerate bucket objects (can be time-consuming).") + flagSettings["enumerate"] = flagSetting{category: CategoryOptions} + flag.IntVar(&args.threads, "threads", 4, "Number of threads to scan with.") + flagSettings["threads"] = flagSetting{category: CategoryOptions} + flag.BoolVar(&args.verbose, "verbose", false, "Enable verbose logging.") + flagSettings["verbose"] = flagSetting{category: CategoryDebug} + flag.BoolVar(&args.version, "version", false, "Print version") + flagSettings["version"] = flagSetting{category: CategoryDebug} + + flag.Usage = func() { + bufferCategoryInput := new(bytes.Buffer) + bufferCategoryOutput := new(bytes.Buffer) + bufferCategoryOptions := new(bytes.Buffer) + bufferCategoryDebug := new(bytes.Buffer) + categoriesWriters := map[int]*tabwriter.Writer{ + CategoryInput: tabwriter.NewWriter(bufferCategoryInput, 0, 0, 2, ' ', 0), + CategoryOutput: tabwriter.NewWriter(bufferCategoryOutput, 0, 0, 2, ' ', 0), + CategoryOptions: tabwriter.NewWriter(bufferCategoryOptions, 0, 0, 2, ' ', 0), + CategoryDebug: tabwriter.NewWriter(bufferCategoryDebug, 0, 0, 2, ' ', 0), + } + flag.VisitAll(func(f *flag.Flag) { + setting, ok := flagSettings[f.Name] + if !ok { + log.Errorf("flag is missing category: %s", f.Name) + os.Exit(1) + } + writer := categoriesWriters[setting.category] + + fmt.Fprintf(writer, "%s -%s\t", strings.Repeat(" ", setting.indentLevel), f.Name) // Two spaces before -; see next two comments. + name, usage := flag.UnquoteUsage(f) + fmt.Fprintf(writer, " %s\t", name) + fmt.Fprint(writer, usage) + if !reflect.ValueOf(f.DefValue).IsZero() { + fmt.Fprintf(writer, " Default: %q", f.DefValue) + } + fmt.Fprint(writer, "\n") + }) + + // Output all the categories + categoriesWriters[CategoryInput].Flush() + categoriesWriters[CategoryOutput].Flush() + categoriesWriters[CategoryOptions].Flush() + categoriesWriters[CategoryDebug].Flush() + fmt.Fprint(flag.CommandLine.Output(), "INPUT: (1 required)\n", bufferCategoryInput.String()) + fmt.Fprint(flag.CommandLine.Output(), "\nOUTPUT:\n", bufferCategoryOutput.String()) + fmt.Fprint(flag.CommandLine.Output(), "\nOPTIONS:\n", bufferCategoryOptions.String()) + fmt.Fprint(flag.CommandLine.Output(), "\nDEBUG:\n", bufferCategoryDebug.String()) + + // Add config file description + quotedPaths := "" + for i, b := range configPaths { + if i != 0 { + quotedPaths += " " + } + quotedPaths += fmt.Sprintf("\"%s\"", b) + } + + fmt.Fprintf(flag.CommandLine.Output(), "\nIf config file is required these locations will be searched for config.yml: %s\n", + quotedPaths) + } + flag.Parse() + + if args.version { + fmt.Println(version) + os.Exit(0) + } + + argsErr := args.Validate() + if argsErr != nil { + log.Error(argsErr) + os.Exit(1) + } + + // Configure logging + log.SetLevel(log.InfoLevel) + if args.verbose { + log.SetLevel(log.DebugLevel) + } + log.SetOutput(os.Stdout) + if args.json { + log.SetFormatter(&log2.NestedJSONFormatter{}) + } else { + log.SetFormatter(&log.TextFormatter{DisableTimestamp: true}) + } + + var provider StorageProvider + var err error + configErr := validateConfig(args) + if configErr != nil { + log.Error(configErr) + os.Exit(1) + } + if args.providerFlag == "custom" { + if viper.IsSet("providers.custom") { + log.Debug("found custom provider") + provider, err = NewCustomProvider( + viper.GetString("providers.custom.address_style"), + viper.GetBool("providers.custom.insecure"), + viper.GetStringSlice("providers.custom.regions"), + viper.GetString("providers.custom.endpoint_format")) + if err != nil { + log.Error(err) + os.Exit(1) + } + } + } else { + provider, err = NewProvider(args.providerFlag) + if err != nil { + log.Error(err) + os.Exit(1) + } + } + + // Setup database connection + if args.writeToDB { + dbConfig := viper.GetString("db.uri") + log.Debugf("using database URI from config: %s", dbConfig) + dbErr := db.Connect(dbConfig, true) + if dbErr != nil { + log.Error(dbErr) + os.Exit(1) + } + } + + var wg sync.WaitGroup + + if !args.useMq { + buckets := make(chan bucket.Bucket) + + for i := 0; i < args.threads; i++ { + wg.Add(1) + go work(&wg, buckets, provider, args.doEnumerate, args.writeToDB) + } + + if args.bucketFile != "" { + err := bucket.ReadFromFile(args.bucketFile, buckets) + close(buckets) + if err != nil { + log.Error(err) + os.Exit(1) + } + } else if args.bucketName != "" { + if !bucket.IsValidS3BucketName(args.bucketName) { + log.Info(fmt.Sprintf("invalid | %s", args.bucketName)) + os.Exit(0) + } + c := bucket.NewBucket(strings.ToLower(args.bucketName)) + buckets <- c + close(buckets) + } + + wg.Wait() + os.Exit(0) + } + + // Setup mq connection and spin off consumers + mqUri := viper.GetString("mq.uri") + mqName := viper.GetString("mq.queue_name") + conn, err := amqp.Dial(mqUri) + failOnError(err, fmt.Sprintf("failed to connect to AMQP URI '%s'", mqUri)) + defer conn.Close() + + for i := 0; i < args.threads; i++ { + wg.Add(1) + go mqwork(i, &wg, conn, provider, mqName, args.threads, args.doEnumerate, args.writeToDB) + } + log.Printf("Waiting for messages. To exit press CTRL+C") + wg.Wait() +} diff --git a/main_test.go b/main_test.go new file mode 100644 index 0000000..a33e2bc --- /dev/null +++ b/main_test.go @@ -0,0 +1,179 @@ +package main + +import ( + "bytes" + "encoding/json" + log "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/writer" + "github.com/streadway/amqp" + "github.com/stretchr/testify/assert" + "os" + "s3scanner/bucket" + "s3scanner/mq" + "s3scanner/provider" + "sync" + "testing" +) + +func publishBucket(ch *amqp.Channel, b bucket.Bucket) { + bucketBytes, err := json.Marshal(b) + if err != nil { + failOnError(err, "Failed to marshal bucket msg") + } + + err = ch.Publish( + "", + "test", + false, + false, + amqp.Publishing{Body: bucketBytes, DeliveryMode: amqp.Transient}, + ) + if err != nil { + failOnError(err, "Failed to publish to channel") + } +} + +func TestArgCollection_Validate(t *testing.T) { + goodInputs := []argCollection{ + { + bucketName: "asdf", + bucketFile: "", + useMq: false, + }, + { + bucketName: "", + bucketFile: "buckets.txt", + useMq: false, + }, + { + bucketName: "", + bucketFile: "", + useMq: true, + }, + } + tooManyInputs := []argCollection{ + { + bucketName: "asdf", + bucketFile: "asdf", + useMq: false, + }, + { + bucketName: "adsf", + bucketFile: "", + useMq: true, + }, + { + bucketName: "", + bucketFile: "asdf.txt", + useMq: true, + }, + } + + for _, v := range goodInputs { + err := v.Validate() + if err != nil { + t.Errorf("%v: %e", v, err) + } + } + for _, v := range tooManyInputs { + err := v.Validate() + if err == nil { + t.Errorf("expected error but did not find one: %v", v) + } + } +} + +func TestWork(t *testing.T) { + b := bucket.NewBucket("s3scanner-bucketsize") + aws, err := provider.NewProviderAWS() + assert.Nil(t, err) + b2, exErr := aws.BucketExists(&b) + assert.Nil(t, exErr) + + wg := sync.WaitGroup{} + wg.Add(1) + c := make(chan bucket.Bucket, 1) + c <- *b2 + close(c) + work(&wg, c, aws, true, false) +} + +func TestMqWork(t *testing.T) { + _, testMQ := os.LookupEnv("TEST_MQ") + if !testMQ { + t.Skip("TEST_MQ not enabled") + } + + aws, err := provider.NewProviderAWS() + assert.Nil(t, err) + + wg := sync.WaitGroup{} + wg.Add(1) + + conn, err := amqp.Dial("amqp://guest:guest@localhost:5672") + assert.Nil(t, err) + + // Connect to queue and add a test bucket + ch, err := mq.Connect(conn, "test", 1, 0) + assert.Nil(t, err) + publishBucket(ch, bucket.Bucket{Name: "mqtest"}) + + mqwork(0, &wg, conn, aws, "test", 1, + false, false) +} + +func TestLogs(t *testing.T) { + var buf bytes.Buffer + log.AddHook(&writer.Hook{ // Send logs with level higher than warning to stderr + Writer: &buf, + LogLevels: []log.Level{ + log.PanicLevel, + log.FatalLevel, + log.ErrorLevel, + log.WarnLevel, + log.InfoLevel, + }, + }) + + tests := []struct { + name string + b bucket.Bucket + enum bool + expected string + }{ + {name: "enumerated, public-read, empty", b: bucket.Bucket{ + Name: "test-logging", + Exists: bucket.BucketExists, + ObjectsEnumerated: true, + NumObjects: 0, + BucketSize: 0, + PermAllUsersRead: bucket.PermissionAllowed, + }, enum: true, expected: "exists | test-logging | | AuthUsers: [] | AllUsers: [READ] | 0 objects (0 B)"}, + {name: "enumerated, closed", b: bucket.Bucket{ + Name: "enumerated-closed", + Exists: bucket.BucketExists, + ObjectsEnumerated: true, + NumObjects: 0, + BucketSize: 0, + PermAllUsersRead: bucket.PermissionDenied, + }, enum: true, expected: "exists | enumerated-closed | | AuthUsers: [] | AllUsers: [] | 0 objects (0 B)"}, + {name: "closed", b: bucket.Bucket{ + Name: "no-enumerate-closed", + Exists: bucket.BucketExists, + ObjectsEnumerated: false, + PermAllUsersRead: bucket.PermissionDenied, + }, enum: true, expected: "exists | no-enumerate-closed | | AuthUsers: [] | AllUsers: []"}, + {name: "no-enum-not-exist", b: bucket.Bucket{ + Name: "no-enum-not-exist", + Exists: bucket.BucketNotExist, + }, enum: false, expected: "not_exist | no-enum-not-exist"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t2 *testing.T) { + printResult(&tt.b) + assert.Contains(t2, buf.String(), tt.expected) + }) + } + +} diff --git a/mq/mq.go b/mq/mq.go new file mode 100644 index 0000000..62eac74 --- /dev/null +++ b/mq/mq.go @@ -0,0 +1,40 @@ +package mq + +import ( + "fmt" + log "github.com/sirupsen/logrus" + "github.com/streadway/amqp" +) + +func Connect(conn *amqp.Connection, queue string, threads int, threadId int) (*amqp.Channel, error) { + log.Debugf("{thread%v} Opening channel...", threadId) + ch, channelErr := conn.Channel() + if channelErr != nil { + return nil, fmt.Errorf("[Connect()] failed to open a channel: %w", channelErr) + } + //defer ch.Close() + + // Declare dead letter queue + dlq, dlErr := ch.QueueDeclare(queue+"_dead", true, false, false, false, nil) + if dlErr != nil { + return nil, fmt.Errorf("[Connect()] failed to declare dead letter queue: %w", dlErr) + } + + // Declare queue to consume messages from + _, queueErr := ch.QueueDeclare(queue, true, false, false, false, + amqp.Table{ + "x-dead-letter-exchange": "", + "x-dead-letter-routing-key": dlq.Name, + }, + ) + if queueErr != nil { + return nil, fmt.Errorf("[Connect()] failed to declare a queue: %w", queueErr) + } + + qosErr := ch.Qos(threads, 0, false) + if qosErr != nil { + return nil, fmt.Errorf("[Connect()] failed to set QoS: %w", qosErr) + } + + return ch, nil +} diff --git a/packaging/.goreleaser.yaml b/packaging/.goreleaser.yaml new file mode 100644 index 0000000..7c2549e --- /dev/null +++ b/packaging/.goreleaser.yaml @@ -0,0 +1,47 @@ +before: + hooks: + # You may remove this if you don't use go modules. + - go mod tidy + # you may remove this if you don't need go generate +# - go generate ./... +builds: + - env: + - CGO_ENABLED=0 + goos: + - linux + - windows + - darwin +nfpms: + - id: s3scanner + maintainer: Dan Salmon + description: "Scan for open S3 buckets and dump the contents" + homepage: https://github.com/sa7mon/S3Scanner + license: MIT + formats: + - deb + - rpm + - apk +archives: + - format: tar.gz + # this name template makes the OS and Arch compatible with the results of uname. + name_template: >- + {{ .ProjectName }}_ + {{- title .Os }}_ + {{- if eq .Arch "amd64" }}x86_64 + {{- else if eq .Arch "386" }}i386 + {{- else }}{{ .Arch }}{{ end }} + {{- if .Arm }}v{{ .Arm }}{{ end }} + # use zip for windows archives + format_overrides: + - goos: windows + format: zip +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ incpatch .Version }}-next" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' diff --git a/packaging/docker/.dockerignore b/packaging/docker/.dockerignore new file mode 100644 index 0000000..2af8781 --- /dev/null +++ b/packaging/docker/.dockerignore @@ -0,0 +1,6 @@ +data/ +.git +bin/ +infra/ +packaging/dist/ +*.log \ No newline at end of file diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile new file mode 100644 index 0000000..51b9623 --- /dev/null +++ b/packaging/docker/Dockerfile @@ -0,0 +1,17 @@ +FROM golang:1.18-alpine AS builder + +ARG VERSION=dev + +WORKDIR /app +COPY .. /app + +RUN go build -ldflags="-X 'main.version=${VERSION}'" -o /s3scanner . +RUN go build -o /mqingest ./cmd/mqingest/ + +FROM alpine + +COPY --from=builder /s3scanner / +COPY --from=builder /mqingest / +ENV PATH="/" +ENTRYPOINT ["/s3scanner"] + diff --git a/permission/permission.go b/permission/permission.go new file mode 100644 index 0000000..804c6be --- /dev/null +++ b/permission/permission.go @@ -0,0 +1,153 @@ +package permission + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/aws-sdk-go-v2/service/s3" + log "github.com/sirupsen/logrus" + . "s3scanner/bucket" + . "s3scanner/groups" + "strings" + "time" +) + +func CheckPermReadACL(s3Client *s3.Client, bucket *Bucket) (bool, error) { + aclOutput, err := s3Client.GetBucketAcl(context.TODO(), + &s3.GetBucketAclInput{Bucket: &bucket.Name}) + if err != nil { + log.WithFields(log.Fields{ + "bucket_name": bucket.Name, + "method": "permission.CheckPermReadACL()", + }).Debugf("error getting ACL: %v", err) + var re *awshttp.ResponseError + if errors.As(err, &re) { + if re.HTTPStatusCode() == 403 { + //fmt.Println("Access Denied!") + return false, nil + } else { + return false, err + } + } + return false, err + } + err = bucket.ParseAclOutputv2(aclOutput) + if err != nil { + return false, err + } + return true, nil +} + +func CheckPermWriteAcl(svc *s3.Client, b *Bucket) (bool, error) { + // TODO: Ensure bucket exists + // TODO: Make sure this works with a bucket that allows PutACL. 400's returned always right now, is that because no creds? + + grants := map[string][]string{} + if b.PermAuthUsersFullControl == PermissionAllowed { + grants["FULL_CONTROL"] = append(grants["FULL_CONTROL"], AUTH_USERS_URI) + } + if b.PermAuthUsersWriteACL == PermissionAllowed { + grants["WRITE_ACP"] = append(grants["WRITE_ACP"], AUTH_USERS_URI) + } + if b.PermAuthUsersWrite == PermissionAllowed { + grants["WRITE"] = append(grants["WRITE"], AUTH_USERS_URI) + } + if b.PermAuthUsersReadACL == PermissionAllowed { + grants["READ_ACP"] = append(grants["READ_ACP"], AUTH_USERS_URI) + } + if b.PermAuthUsersRead == PermissionAllowed { + grants["READ"] = append(grants["READ"], AUTH_USERS_URI) + } + + if b.PermAllUsersFullControl == PermissionAllowed { + grants["FULL_CONTROL"] = append(grants["FULL_CONTROL"], ALL_USERS_URI) + } + if b.PermAllUsersWriteACL == PermissionAllowed { + grants["WRITE_ACP"] = append(grants["WRITE_ACP"], ALL_USERS_URI) + } + if b.PermAllUsersWrite == PermissionAllowed { + grants["WRITE"] = append(grants["WRITE"], ALL_USERS_URI) + } + if b.PermAllUsersReadACL == PermissionAllowed { + grants["READ_ACP"] = append(grants["READ_ACP"], ALL_USERS_URI) + } + if b.PermAllUsersRead == PermissionAllowed { + grants["READ"] = append(grants["READ"], ALL_USERS_URI) + } + + _, err := svc.PutBucketAcl(context.TODO(), &s3.PutBucketAclInput{ + Bucket: &b.Name, + GrantFullControl: aws.String(strings.Join(grants["FULL_CONTROL"], ",")), + GrantWriteACP: aws.String(strings.Join(grants["WRITE_ACP"], ",")), + GrantWrite: aws.String(strings.Join(grants["WRITE"], ",")), + GrantReadACP: aws.String(strings.Join(grants["READ_ACP"], ",")), + GrantRead: aws.String(strings.Join(grants["READ"], ",")), + }) + if err != nil { + var re *awshttp.ResponseError + if errors.As(err, &re) { + if re.HTTPStatusCode() == 400 || re.HTTPStatusCode() == 403 { + //fmt.Println("Access Denied!") + return false, nil + } else { + return false, err + } + } + return false, err + } + + return true, nil +} + +func CheckPermWrite(svc *s3.Client, bucket *Bucket) (bool, error) { + // TODO: Ensure bucket exists + // TODO: What happens if we fail to clean up temp file + + // Try to put an object with a unique name and no body + timestampFile := fmt.Sprintf("%v_%v.txt", time.Now().Unix(), bucket.Name) + _, err := svc.PutObject(context.TODO(), &s3.PutObjectInput{ + Bucket: aws.String(bucket.Name), + Key: ×tampFile, + Body: nil, + }) + if err != nil { + var re *awshttp.ResponseError + if errors.As(err, &re) { + if re.HTTPStatusCode() == 403 { // No permission + return false, nil + } else { + return false, err + } + } + } + + // Clean up temporary file if it was successful + _, err = svc.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ + Bucket: aws.String(bucket.Name), + Key: ×tampFile, + }) + if err != nil { + return true, err + } + + return true, nil +} + +func CheckPermRead(svc *s3.Client, bucket *Bucket) (bool, error) { + _, err := svc.HeadBucket(context.TODO(), &s3.HeadBucketInput{Bucket: &bucket.Name}) + if err != nil { + log.Debugf("[%v][CheckPermRead] err: %v", bucket.Name, err) + var re *awshttp.ResponseError + if errors.As(err, &re) { + if re.HTTPStatusCode() == 403 { // No permission + return false, nil + } else { + return false, fmt.Errorf("[CheckPermRead] %s : %s : %w", bucket.Name, bucket.Region, err) + } + } + return false, fmt.Errorf("[CheckPermRead] %s : %s : %w", bucket.Name, bucket.Region, err) + } + return true, nil +} diff --git a/permission/permission_test.go b/permission/permission_test.go new file mode 100644 index 0000000..e73a8e1 --- /dev/null +++ b/permission/permission_test.go @@ -0,0 +1,159 @@ +package permission + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/stretchr/testify/assert" + "os" + "s3scanner/bucket" + "testing" +) + +var east2AnonClient *s3.Client +var east1AnonClient *s3.Client + +func failIfError(t *testing.T, err error) { + if err != nil { + t.Error(err) + } +} + +func TestMain(m *testing.M) { + cfg, err := config.LoadDefaultConfig( + context.TODO(), + config.WithRegion("us-east-1"), + config.WithCredentialsProvider(aws.AnonymousCredentials{}), + ) + if err != nil { + panic(err) + } + cfg.Credentials = nil + east1AnonClient = s3.NewFromConfig(cfg) + + east2Cfg, err := config.LoadDefaultConfig( + context.TODO(), + config.WithRegion("us-east-2"), + config.WithCredentialsProvider(aws.AnonymousCredentials{}), + ) + if err != nil { + panic(err) + } + east2Cfg.Credentials = nil + east2AnonClient = s3.NewFromConfig(east2Cfg) + + code := m.Run() + os.Exit(code) +} + +func TestCheckPermReadACL(t *testing.T) { + t.Parallel() + + // Bucket with READ_ACP allowed for AllUsers + permReadAllowed, err := CheckPermReadACL(east2AnonClient, &bucket.Bucket{ + Name: "s3scanner-all-readacp", + Region: "us-east-2", + }) + failIfError(t, err) + assert.True(t, permReadAllowed) + + // Bucket with READ_ACP allowed for AuthenticatedUsers + //permReadAllowed, err = CheckPermReadACL(east2CredClient, &bucket.Bucket{ + // Name: "s3scanner-auth-read-acl", + // Region: "us-east-2", + //}) + //failIfError(t, err) + //assert.True(t, permReadAllowed) + + // Bucket without READ_ACP allowed + permReadAllowed, err = CheckPermReadACL(east1AnonClient, &bucket.Bucket{ + Name: "s3scanner-private", + Region: "us-east-1", + }) + failIfError(t, err) + assert.False(t, permReadAllowed) + + // Bucket with READ_ACP allowed for AuthenticatedUsers, but we scan without creds + permReadAllowed, err = CheckPermReadACL(east2AnonClient, &bucket.Bucket{ + Name: "s3scanner-auth-read-acl", + Region: "us-east-2", + }) + failIfError(t, err) + assert.False(t, permReadAllowed) +} + +func TestCheckPermRead(t *testing.T) { + t.Parallel() + + // Bucket with READ permission + readAllowedBucket := bucket.Bucket{ + Name: "s3scanner-bucketsize", + Region: "us-east-1", + } + + // Assert we can read the bucket without creds + permReadAllowed, err := CheckPermRead(east1AnonClient, &readAllowedBucket) + failIfError(t, err) + assert.True(t, permReadAllowed) + + // Assert we can read the bucket with creds + //permReadAllowed, err = CheckPermRead(east1CredClient, &readAllowedBucket) + //failIfError(t, err) + //assert.True(t, permReadAllowed) + + // Bucket without READ permission + readNotAllowedBucket := bucket.Bucket{ + Name: "test", + Region: "us-east-2", + } + + // Assert we can't read the bucket without creds + permReadAllowed, err = CheckPermRead(east2AnonClient, &readNotAllowedBucket) + failIfError(t, err) + assert.False(t, permReadAllowed) + + // Assert we can't read the bucket even with creds + //permReadAllowed, err = CheckPermRead(east2CredClient, &readNotAllowedBucket) + //failIfError(t, err) + //assert.False(t, permReadAllowed) +} + +func TestCheckPermWrite(t *testing.T) { + t.Parallel() + + // Bucket with READ permission + readAllowedBucket := bucket.Bucket{ + Name: "s3scanner-bucketsize", + Region: "us-east-1", + } + + // Assert we can read the bucket without creds + permWrite, err := CheckPermWrite(east1AnonClient, &readAllowedBucket) + assert.Nil(t, err) + assert.False(t, permWrite) +} + +func TestCheckPermWriteACL(t *testing.T) { + t.Parallel() + + // Bucket with READ permission + readAllowedBucket := bucket.Bucket{ + Name: "s3scanner-bucketsize", + Region: "us-east-1", + PermAllUsersRead: bucket.PermissionAllowed, + PermAllUsersWrite: bucket.PermissionAllowed, + PermAllUsersReadACL: bucket.PermissionAllowed, + PermAllUsersFullControl: bucket.PermissionAllowed, + PermAuthUsersRead: bucket.PermissionAllowed, + PermAuthUsersReadACL: bucket.PermissionAllowed, + PermAuthUsersWrite: bucket.PermissionAllowed, + PermAuthUsersFullControl: bucket.PermissionAllowed, + } + + // Assert we can read the bucket without creds + permWrite, err := CheckPermWriteAcl(east1AnonClient, &readAllowedBucket) + assert.Nil(t, err) + assert.False(t, permWrite) + +} diff --git a/provider/aws.go b/provider/aws.go new file mode 100644 index 0000000..28c7f31 --- /dev/null +++ b/provider/aws.go @@ -0,0 +1,150 @@ +package provider + +import ( + "context" + "errors" + "github.com/aws/aws-sdk-go-v2/aws" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + log "github.com/sirupsen/logrus" + "s3scanner/bucket" +) + +type providerAWS struct { + existsClient *s3.Client + clients map[string]*s3.Client +} + +func (a *providerAWS) BucketExists(b *bucket.Bucket) (*bucket.Bucket, error) { + b.Provider = a.Name() + if !bucket.IsValidS3BucketName(b.Name) { + return nil, errors.New("invalid bucket name") + } + region, err := manager.GetBucketRegion(context.TODO(), a.existsClient, b.Name) + if err == nil { + log.WithFields(log.Fields{"method": "aws.BucketExists()", + "bucket_name": b.Name, "region": region}).Debugf("no error - bucket exists") + b.Exists = bucket.BucketExists + b.Region = region + return b, nil + } + log.WithFields(log.Fields{"method": "aws.BucketExists()", + "bucket_name": b.Name, "region": region}).Debug(err) + + var bnf manager.BucketNotFound + var re2 *awshttp.ResponseError + if errors.As(err, &bnf) { + b.Exists = bucket.BucketNotExist + return b, nil + } else if errors.As(err, &re2) && re2.HTTPStatusCode() == 403 { + // AccessDenied implies the bucket exists + b.Exists = bucket.BucketExists + b.Region = region + return b, nil + } else { + // Error wasn't BucketNotFound or 403 + return b, err + } +} + +func (a *providerAWS) Scan(b *bucket.Bucket, doDestructiveChecks bool) error { + client, err := a.getRegionClient(b.Region) + if err != nil { + return err + } + + return checkPermissions(client, b, doDestructiveChecks) +} + +func (a *providerAWS) Enumerate(b *bucket.Bucket) error { + client, err := a.getRegionClient(b.Region) + if err != nil { + return err + } + + enumErr := enumerateListObjectsV2(client, b) + if enumErr != nil { + return enumErr + } + return nil +} + +func NewProviderAWS() (*providerAWS, error) { + pa := new(providerAWS) + client, err := pa.newAnonClientNoRegion() + if err != nil { + return nil, err + } + pa.existsClient = client + + // Seed the clients map with a common region + usEastClient, usErr := pa.newClient("us-east-1") + if usErr != nil { + return nil, usErr + } + pa.clients = map[string]*s3.Client{"us-east-1": usEastClient} + return pa, nil +} + +func (a *providerAWS) AddressStyle() int { + // AWS supports both styles + return VirtualHostStyle +} + +func (*providerAWS) Insecure() bool { + return false +} + +func (*providerAWS) Name() string { + return "aws" +} + +func (*providerAWS) newAnonClientNoRegion() (*s3.Client, error) { + cfg, err := config.LoadDefaultConfig( + context.TODO(), + config.WithDefaultRegion("us-west-2"), + config.WithCredentialsProvider(aws.AnonymousCredentials{}), + ) + if err != nil { + return nil, err + } + + cfg.Credentials = nil + s3ClientNoRegion := s3.NewFromConfig(cfg, func(o *s3.Options) { + o.UsePathStyle = false + }) + + return s3ClientNoRegion, nil +} + +func (a *providerAWS) newClient(region string) (*s3.Client, error) { + cfg, err := config.LoadDefaultConfig( + context.TODO(), + config.WithRegion(region), + config.WithCredentialsProvider(aws.AnonymousCredentials{})) + + if err != nil { + return nil, err + } + + cfg.Credentials = nil + return s3.NewFromConfig(cfg), nil +} + +// TODO: This method is copied from providerLinode +func (a *providerAWS) getRegionClient(region string) (*s3.Client, error) { + c, ok := a.clients[region] + if ok { + return c, nil + } + + // No client for this region yet - create one + c, err := a.newClient(region) + if err != nil { + return nil, err + } + a.clients[region] = c // TODO: Make sure this is thread-safe + return c, nil +} diff --git a/provider/aws_test.go b/provider/aws_test.go new file mode 100644 index 0000000..3baf969 --- /dev/null +++ b/provider/aws_test.go @@ -0,0 +1,139 @@ +package provider + +import ( + "github.com/stretchr/testify/assert" + "s3scanner/bucket" + "testing" +) + +type bucketPermissionTestCase struct { + b bucket.Bucket + ExpectedPermAuthUsersRead uint8 + ExpectedPermAuthUsersWrite uint8 + ExpectedPermAuthUsersReadACL uint8 + ExpectedPermAuthUsersWriteACL uint8 + ExpectedPermAuthUsersFullControl uint8 + ExpectedPermAllUsersRead uint8 + ExpectedPermAllUsersWrite uint8 + ExpectedPermAllUsersReadACL uint8 + ExpectedPermAllUsersWriteACL uint8 + ExpectedPermAllUsersFullControl uint8 +} + +// // Bucket exists and has READ_ACL open for AuthenticatedUsers +// authReadAclOpenBucket := bucket.NewBucket("s3scanner-auth-read-acl") +// err = ScanBucketPermissions(awsClientNoRegion, &authReadAclOpenBucket, false, awsEndpoint, false) +// failIfError(t, err) +// err = ScanBucketPermissions(awsClientNoRegion, &authReadAclOpenBucket, false, awsEndpoint, true) +// failIfError(t, err) +// assert.Equal(t, bucket.BucketExists, authReadAclOpenBucket.Exists) +// assert.Equal(t, bucket.PermissionDenied, authReadAclOpenBucket.PermAllUsersRead) +// assert.Equal(t, bucket.PermissionDenied, authReadAclOpenBucket.PermAllUsersReadACL) +// assert.Equal(t, bucket.PermissionDenied, authReadAclOpenBucket.PermAuthUsersRead) +// assert.Equal(t, bucket.PermissionAllowed, authReadAclOpenBucket.PermAuthUsersReadACL) +// +// // Bucket exists and has READ open for AuthenticatedUsers +// authReadOpenBucket := bucket.NewBucket("s3scanner-auth") +// err = ScanBucketPermissions(awsClientNoRegion, &authReadOpenBucket, false, awsEndpoint, false) +// failIfError(t, err) +// err = ScanBucketPermissions(awsClientNoRegion, &authReadOpenBucket, false, awsEndpoint, true) +// failIfError(t, err) +// assert.Equal(t, bucket.BucketExists, authReadOpenBucket.Exists) +// assert.Equal(t, bucket.PermissionDenied, authReadOpenBucket.PermAllUsersRead) +// assert.Equal(t, bucket.PermissionDenied, authReadOpenBucket.PermAllUsersReadACL) +// assert.Equal(t, bucket.PermissionAllowed, authReadOpenBucket.PermAuthUsersRead) +// assert.Equal(t, bucket.PermissionDenied, authReadOpenBucket.PermAuthUsersReadACL) + +func TestProviderAWS_BucketExists(t *testing.T) { + t.Parallel() + + testCases := []struct { + b bucket.Bucket + shouldExist uint8 + }{ + {bucket.NewBucket("s3scanner-private"), bucket.BucketExists}, // Bucket that exists + {bucket.NewBucket("asdfasdfdoesnotexist"), bucket.BucketNotExist}, // Bucket that doesn't exist + {bucket.NewBucket("flaws.cloud"), bucket.BucketExists}, // Bucket with dot that exists + {bucket.NewBucket("asdfasdf.danthesalmon.com"), bucket.BucketNotExist}, // Bucket with dot that doesn't exist + } + + p, perr := NewProviderAWS() + if perr != nil { + t.Error(perr) + } + + for _, testCase := range testCases { + e, err := p.BucketExists(&testCase.b) + if err != nil { + t.Error(err) + } + assert.Equal(t, testCase.shouldExist, e.Exists, testCase.b.Name) + } + + // Bucket with invalid name + b := bucket.NewBucket("asdf@test.com") + _, err := p.BucketExists(&b) + if err == nil { + t.Error("expected error but didn't find one") + } + assert.Equal(t, "invalid bucket name", err.Error()) +} + +func TestProviderAWS_Scan(t *testing.T) { + t.Parallel() + var testCases []bucketPermissionTestCase + testCases = append(testCases, bucketPermissionTestCase{ // Bucket exists but isn't open + b: bucket.NewBucket("test"), + ExpectedPermAllUsersRead: bucket.PermissionDenied, + ExpectedPermAllUsersReadACL: bucket.PermissionDenied, + }) + testCases = append(testCases, bucketPermissionTestCase{ // Bucket exists and has READ open for auth and all + b: bucket.NewBucket("s3scanner-bucketsize"), + ExpectedPermAllUsersRead: bucket.PermissionAllowed, + ExpectedPermAllUsersReadACL: bucket.PermissionDenied, + }) + testCases = append(testCases, bucketPermissionTestCase{ // Bucket exists and has READ and READ_ACL open for auth and all + b: bucket.NewBucket("s3scanner-all-read-readacl"), + ExpectedPermAllUsersRead: bucket.PermissionAllowed, + ExpectedPermAllUsersReadACL: bucket.PermissionAllowed, + ExpectedPermAuthUsersRead: bucket.PermissionAllowed, + ExpectedPermAuthUsersReadACL: bucket.PermissionAllowed, + }) + + p, perr := NewProviderAWS() + if perr != nil { + t.Error(perr) + } + + for _, testCase := range testCases { + b, err := p.BucketExists(&testCase.b) + if err != nil { + t.Error(err) + } + scanErr := p.Scan(b, true) + if scanErr != nil { + t.Error(scanErr) + } + assert.Equal(t, testCase.ExpectedPermAllUsersRead, b.PermAllUsersRead) + assert.Equal(t, testCase.ExpectedPermAllUsersReadACL, b.PermAllUsersReadACL) + } +} + +func TestProviderAWS_Enumerate(t *testing.T) { + t.Parallel() + + p, perr := NewProviderAWS() + if perr != nil { + t.Error(perr) + } + + b := bucket.NewBucket("s3scanner-bucketsize") + b2, err := p.BucketExists(&b) + assert.Nil(t, err) + assert.Equal(t, bucket.BucketExists, b2.Exists) + + err = p.Enumerate(b2) + assert.Nil(t, err) + assert.EqualValues(t, 1, b2.NumObjects) + assert.EqualValues(t, 1, len(b2.Objects)) +} diff --git a/provider/custom.go b/provider/custom.go new file mode 100644 index 0000000..2ada1fb --- /dev/null +++ b/provider/custom.go @@ -0,0 +1,113 @@ +package provider + +import ( + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/s3" + "s3scanner/bucket" + "strings" +) + +type CustomProvider struct { + regions []string + clients map[string]*s3.Client + insecure bool + addressStyle int + endpointFormat string +} + +func (cp CustomProvider) Insecure() bool { + return cp.insecure +} + +func (cp CustomProvider) AddressStyle() int { + return cp.addressStyle +} + +func (CustomProvider) Name() string { + return "custom" +} + +func (cp CustomProvider) BucketExists(b *bucket.Bucket) (*bucket.Bucket, error) { + b.Provider = cp.Name() + exists, region, err := bucketExists(cp.clients, b) + if err != nil { + return b, err + } + if exists { + b.Exists = bucket.BucketExists + b.Region = region + } else { + b.Exists = bucket.BucketNotExist + } + + return b, nil +} + +func (cp CustomProvider) Scan(b *bucket.Bucket, doDestructiveChecks bool) error { + client := cp.getRegionClient(b.Region) + return checkPermissions(client, b, doDestructiveChecks) +} + +func (cp CustomProvider) Enumerate(b *bucket.Bucket) error { + if b.Exists != bucket.BucketExists { + return errors.New("bucket might not exist") + } + if b.PermAllUsersRead != bucket.PermissionAllowed { + return nil + } + + client := cp.getRegionClient(b.Region) + enumErr := enumerateListObjectsV2(client, b) + if enumErr != nil { + return enumErr + } + return nil +} + +func (cp *CustomProvider) getRegionClient(region string) *s3.Client { + c, ok := cp.clients[region] + if ok { + return c + } + return nil +} + +/* +NewCustomProvider is a constructor which makes a new custom provider with the given options. +addressStyle should either be "path" or "vhost" +*/ +func NewCustomProvider(addressStyle string, insecure bool, regions []string, endpointFormat string) (*CustomProvider, error) { + cp := new(CustomProvider) + cp.regions = regions + cp.insecure = insecure + cp.endpointFormat = endpointFormat + if addressStyle == "path" { + cp.addressStyle = PathStyle + } else if addressStyle == "vhost" { + cp.addressStyle = VirtualHostStyle + } else { + return cp, fmt.Errorf("unknown custom provider address style: %s. Expected 'path' or 'vhost'", addressStyle) + } + + clients, err := cp.newClients() + if err != nil { + return nil, err + } + cp.clients = clients + return cp, nil +} + +func (cp *CustomProvider) newClients() (map[string]*s3.Client, error) { + clients := make(map[string]*s3.Client, len(cp.regions)) + for _, r := range cp.regions { + regionUrl := strings.Replace(cp.endpointFormat, "$REGION", r, -1) + client, err := newNonAWSClient(cp, regionUrl) + if err != nil { + return nil, err + } + clients[r] = client + } + + return clients, nil +} diff --git a/provider/custom_test.go b/provider/custom_test.go new file mode 100644 index 0000000..72aeb4e --- /dev/null +++ b/provider/custom_test.go @@ -0,0 +1,30 @@ +package provider + +import ( + "github.com/stretchr/testify/assert" + "s3scanner/bucket" + "testing" +) + +func TestCustomProvider_BucketExists(t *testing.T) { + t.Parallel() + + p := providers["custom"] + var tests = []struct { + name string + b bucket.Bucket + exists uint8 + }{ + {name: "exists, access denied", b: bucket.NewBucket("assets"), exists: bucket.BucketExists}, + {name: "exists, open", b: bucket.NewBucket("nurse-virtual-assistants"), exists: bucket.BucketExists}, + {name: "no such bucket", b: bucket.NewBucket("s3scanner-no-exist"), exists: bucket.BucketNotExist}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t2 *testing.T) { + gb, err := p.BucketExists(&tt.b) + assert.Nil(t2, err) + assert.Equal(t2, tt.exists, gb.Exists) + }) + } +} diff --git a/provider/digitalocean.go b/provider/digitalocean.go new file mode 100644 index 0000000..b4ddff1 --- /dev/null +++ b/provider/digitalocean.go @@ -0,0 +1,100 @@ +package provider + +import ( + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/s3" + "s3scanner/bucket" +) + +type providerDO struct { + regions []string + clients map[string]*s3.Client +} + +func (pdo providerDO) Insecure() bool { + return false +} + +func (pdo providerDO) Name() string { + return "digitalocean" +} + +func (pdo providerDO) AddressStyle() int { + return PathStyle +} + +func (pdo providerDO) BucketExists(b *bucket.Bucket) (*bucket.Bucket, error) { + b.Provider = pdo.Name() + exists, region, err := bucketExists(pdo.clients, b) + if err != nil { + return b, err + } + if exists { + b.Exists = bucket.BucketExists + b.Region = region + } else { + b.Exists = bucket.BucketNotExist + } + + return b, nil +} + +func (pdo providerDO) Scan(bucket *bucket.Bucket, doDestructiveChecks bool) error { + client := pdo.getRegionClient(bucket.Region) + return checkPermissions(client, bucket, doDestructiveChecks) +} + +func (pdo providerDO) Enumerate(b *bucket.Bucket) error { + if b.Exists != bucket.BucketExists { + return errors.New("bucket might not exist") + } + + client := pdo.getRegionClient(b.Region) + enumErr := enumerateListObjectsV2(client, b) + if enumErr != nil { + return enumErr + } + return nil +} + +func (pdo *providerDO) Regions() []string { + urls := make([]string, len(pdo.regions)) + for i, r := range pdo.regions { + urls[i] = fmt.Sprintf("https://%s.digitaloceanspaces.com", r) + } + return urls +} + +func (pdo *providerDO) newClients() (map[string]*s3.Client, error) { + clients := make(map[string]*s3.Client, len(pdo.regions)) + for _, r := range pdo.Regions() { + client, err := newNonAWSClient(pdo, r) + if err != nil { + return nil, err + } + clients[r] = client + } + + return clients, nil +} + +func (pdo *providerDO) getRegionClient(region string) *s3.Client { + c, ok := pdo.clients[region] + if ok { + return c + } + return nil +} + +func NewProviderDO() (*providerDO, error) { + pdo := new(providerDO) + pdo.regions = []string{"nyc3", "sfo2", "sfo3", "ams3", "sgp1", "fra1", "syd1"} + + clients, err := pdo.newClients() + if err != nil { + return pdo, err + } + pdo.clients = clients + return pdo, nil +} diff --git a/provider/digitalocean_test.go b/provider/digitalocean_test.go new file mode 100644 index 0000000..ec00dc3 --- /dev/null +++ b/provider/digitalocean_test.go @@ -0,0 +1,60 @@ +package provider + +import ( + "github.com/stretchr/testify/assert" + "s3scanner/bucket" + "testing" +) + +func TestScanBucketPermissions_DO(t *testing.T) { + t.Parallel() + + do, doErr := NewProviderDO() + assert.Nil(t, doErr) + + // Bucket exists but isn't open + c := bucket.NewBucket("admin") + c2, cErr := do.BucketExists(&c) + assert.Nil(t, cErr) + cScanErr := do.Scan(c2, true) + assert.Nil(t, cScanErr) + assert.Equal(t, bucket.BucketExists, c2.Exists) + assert.Equal(t, bucket.PermissionDenied, c2.PermAllUsersRead) + + // Bucket exists and has READ open + o := bucket.NewBucket("stats") + o2, oErr := do.BucketExists(&o) + assert.Nil(t, oErr) + oScanErr := do.Scan(o2, true) + assert.Nil(t, oScanErr) + assert.Equal(t, bucket.BucketExists, o2.Exists) + assert.Equal(t, bucket.PermissionAllowed, o2.PermAllUsersRead) + + // Bucket with a dot that does not exist + //dotNoBucket := bucket.NewBucket("s3.s3scanner.com") + + // Bucket with an invalid name (contains @ sign) + //emailBucket := bucket.NewBucket("admin@example.com") + + // Bucket exists and has READ and READ_ACL open + // TODO: Find a bucket for here + //readAclOpenBucket := bucket.NewBucket("s3scanner-all-read-readacl") + //err = ScanBucketPermissions(doClient, &readAclOpenBucket, false, doEndpoint) + //if err != nil { + // t.Error(err) + //} + //assert.Equal(t, bucket.BucketExists, readAclOpenBucket.Exists) + //assert.Equal(t, bucket.PermissionAllowed, readAclOpenBucket.PermAllUsersRead) + //assert.Equal(t, bucket.PermissionAllowed, readAclOpenBucket.PermAllUsersReadACL) + + // Open bucket with a dot that exists + // TODO: Find a bucket for here + //dotBucket := bucket.NewBucket("flaws.cloud") + //err = ScanBucketPermissions(doClient, &dotBucket, false, doEndpoint) + //if err != nil { + // t.Error(err) + //} + //assert.Equal(t, bucket.BucketExists, dotBucket.Exists) + //assert.Equal(t, bucket.PermissionAllowed, dotBucket.PermAllUsersRead) + +} diff --git a/provider/dreamhost.go b/provider/dreamhost.go new file mode 100644 index 0000000..49d56df --- /dev/null +++ b/provider/dreamhost.go @@ -0,0 +1,100 @@ +package provider + +import ( + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/s3" + "s3scanner/bucket" +) + +type ProviderDreamhost struct { + regions []string + clients map[string]*s3.Client +} + +func (p ProviderDreamhost) Insecure() bool { + return false +} + +func (ProviderDreamhost) Name() string { + return "dreamhost" +} + +func (p ProviderDreamhost) AddressStyle() int { + return PathStyle +} + +func (p ProviderDreamhost) BucketExists(b *bucket.Bucket) (*bucket.Bucket, error) { + b.Provider = p.Name() + exists, region, err := bucketExists(p.clients, b) + if err != nil { + return b, err + } + if exists { + b.Exists = bucket.BucketExists + b.Region = region + } else { + b.Exists = bucket.BucketNotExist + } + + return b, nil +} + +func (p ProviderDreamhost) Scan(bucket *bucket.Bucket, doDestructiveChecks bool) error { + client := p.getRegionClient(bucket.Region) + return checkPermissions(client, bucket, doDestructiveChecks) +} + +func (p ProviderDreamhost) getRegionClient(region string) *s3.Client { + c, ok := p.clients[region] + if ok { + return c + } + return nil +} + +func (p ProviderDreamhost) Enumerate(b *bucket.Bucket) error { + if b.Exists != bucket.BucketExists { + return errors.New("bucket might not exist") + } + + client := p.getRegionClient(b.Region) + enumErr := enumerateListObjectsV2(client, b) + if enumErr != nil { + return enumErr + } + return nil +} + +func (p ProviderDreamhost) Regions() []string { + urls := make([]string, len(p.regions)) + for i, r := range p.regions { + urls[i] = fmt.Sprintf("https://objects-%s.dream.io", r) + } + return urls +} + +func (p *ProviderDreamhost) newClients() (map[string]*s3.Client, error) { + clients := make(map[string]*s3.Client, len(p.regions)) + for _, r := range p.Regions() { + client, err := newNonAWSClient(p, r) + if err != nil { + return nil, err + } + clients[r] = client + } + + return clients, nil +} + +func NewProviderDreamhost() (*ProviderDreamhost, error) { + pd := new(ProviderDreamhost) + pd.regions = []string{"us-east-1"} + + clients, err := pd.newClients() + if err != nil { + return pd, err + } + pd.clients = clients + return pd, nil +} diff --git a/provider/gcp.go b/provider/gcp.go new file mode 100644 index 0000000..e8d9c95 --- /dev/null +++ b/provider/gcp.go @@ -0,0 +1,65 @@ +package provider + +import ( + "errors" + "github.com/aws/aws-sdk-go-v2/service/s3" + "s3scanner/bucket" +) + +// GCP like AWS, has a "universal" endpoint, but unlike AWS GCP does not require you to follow a redirect to the +// "proper" region. We can simply use storage.googleapis.com as the endpoint for all requests. +type GCP struct { + client *s3.Client +} + +func (g GCP) Insecure() bool { + return false +} + +func (GCP) Name() string { + return "gcp" +} + +// AddressStyle will return PathStyle, but GCP also supports VirtualHostStyle +func (g GCP) AddressStyle() int { + return PathStyle +} + +func (g GCP) BucketExists(b *bucket.Bucket) (*bucket.Bucket, error) { + b.Provider = g.Name() + if !bucket.IsValidS3BucketName(b.Name) { + return nil, errors.New("invalid bucket name") + } + exists, region, err := bucketExists(map[string]*s3.Client{"default": g.client}, b) + if err != nil { + return b, err + } + + b.Region = region + if exists { + b.Exists = bucket.BucketExists + } else { + b.Exists = bucket.BucketNotExist + } + + return b, nil +} + +func (g GCP) Scan(bucket *bucket.Bucket, doDestructiveChecks bool) error { + return checkPermissions(g.client, bucket, doDestructiveChecks) +} + +func (g GCP) Enumerate(bucket *bucket.Bucket) error { + return enumerateListObjectsV2(g.client, bucket) +} + +func NewProviderGCP() (*GCP, error) { + pg := new(GCP) + c, err := newNonAWSClient(pg, "https://storage.googleapis.com") + if err != nil { + return pg, err + } + pg.client = c + + return pg, nil +} diff --git a/provider/linode.go b/provider/linode.go new file mode 100644 index 0000000..fb4dcbf --- /dev/null +++ b/provider/linode.go @@ -0,0 +1,100 @@ +package provider + +import ( + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/s3" + "s3scanner/bucket" +) + +type providerLinode struct { + regions []string + clients map[string]*s3.Client +} + +func NewProviderLinode() (*providerLinode, error) { + pl := new(providerLinode) + pl.regions = []string{"us-east-1", "us-southeast-1", "eu-central-1", "ap-south-1"} + + clients, err := pl.newClients() + if err != nil { + return pl, err + } + pl.clients = clients + return pl, nil +} + +func (pl *providerLinode) getRegionClient(region string) *s3.Client { + c, ok := pl.clients[region] + if ok { + return c + } + return nil +} + +func (pl *providerLinode) BucketExists(b *bucket.Bucket) (*bucket.Bucket, error) { + b.Provider = pl.Name() + exists, region, err := bucketExists(pl.clients, b) + if err != nil { + return b, err + } + if exists { + b.Exists = bucket.BucketExists + b.Region = region + } else { + b.Exists = bucket.BucketNotExist + } + + return b, nil +} + +func (pl *providerLinode) Enumerate(b *bucket.Bucket) error { + if b.Exists != bucket.BucketExists { + return errors.New("bucket might not exist") + } + + client := pl.getRegionClient(b.Region) + enumErr := enumerateListObjectsV2(client, b) + if enumErr != nil { + return enumErr + } + return nil +} + +func (pl *providerLinode) newClients() (map[string]*s3.Client, error) { + clients := make(map[string]*s3.Client, len(pl.regions)) + for _, r := range pl.Regions() { + client, err := newNonAWSClient(pl, r) + if err != nil { + return nil, err + } + clients[r] = client + } + + return clients, nil +} + +func (pl *providerLinode) Scan(b *bucket.Bucket, doDestructiveChecks bool) error { + client := pl.getRegionClient(b.Region) + return checkPermissions(client, b, doDestructiveChecks) +} + +func (*providerLinode) Insecure() bool { + return false +} + +func (*providerLinode) Name() string { + return "linode" +} + +func (pl *providerLinode) Regions() []string { + urls := make([]string, len(pl.regions)) + for i, r := range pl.regions { + urls[i] = fmt.Sprintf("https://%s.linodeobjects.com", r) + } + return urls +} + +func (*providerLinode) AddressStyle() int { + return VirtualHostStyle +} diff --git a/provider/providers.go b/provider/providers.go new file mode 100644 index 0000000..48aaa1e --- /dev/null +++ b/provider/providers.go @@ -0,0 +1,242 @@ +package provider + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + log "github.com/sirupsen/logrus" + "net/http" + "s3scanner/bucket" + "s3scanner/permission" + "time" +) + +const ( + PathStyle = 0 + VirtualHostStyle = 1 +) + +type StorageProvider interface { + Insecure() bool + AddressStyle() int + BucketExists(*bucket.Bucket) (*bucket.Bucket, error) + Scan(*bucket.Bucket, bool) error + Enumerate(*bucket.Bucket) error + Name() string +} + +type bucketCheckResult struct { + region string + exists bool +} + +var AllProviders = []string{ + "aws", "custom", "digitalocean", "dreamhost", "gcp", "linode", +} + +func NewProvider(name string) (StorageProvider, error) { + var ( + provider StorageProvider + err error + ) + switch name { + case "aws": + provider, err = NewProviderAWS() + case "digitalocean": + provider, err = NewProviderDO() + case "dreamhost": + provider, err = NewProviderDreamhost() + case "gcp": + provider, err = NewProviderGCP() + case "linode": + provider, err = NewProviderLinode() + default: + err = fmt.Errorf("unknown provider: %s", name) + } + return provider, err +} + +func newNonAWSClient(sp StorageProvider, regionURL string) (*s3.Client, error) { + cfg, err := config.LoadDefaultConfig( + context.TODO(), + config.WithEndpointResolverWithOptions( + aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) { + return aws.Endpoint{ + URL: regionURL, + }, nil + })), + config.WithCredentialsProvider(aws.AnonymousCredentials{}), + ) + if err != nil { + return nil, err + } + + if sp.Insecure() { + cfg.HTTPClient = &http.Client{Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }} + } + + addrStyleOption := func(o *s3.Options) { o.UsePathStyle = false } + if sp.AddressStyle() == PathStyle { + addrStyleOption = func(o *s3.Options) { o.UsePathStyle = true } + } + + cfg.Credentials = nil // TODO: Remove and test + return s3.NewFromConfig(cfg, addrStyleOption), nil +} + +/* +enumerateListObjectsV2 will enumerate all objects stored in b using the ListObjectsV2 API endpoint. The endpoint will +be called until the IsTruncated field is false +*/ +func enumerateListObjectsV2(client *s3.Client, b *bucket.Bucket) error { + var continuationToken *string + continuationToken = nil + page := 0 + + for { + log.WithFields(log.Fields{ + "bucket_name": b.Name, + "method": "providers.enumerateListObjectsV2()", + }).Debugf("requesting objects page %d", page) + output, err := client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{ + Bucket: &b.Name, + ContinuationToken: continuationToken, + EncodingType: types.EncodingTypeUrl, + }, + ) + if err != nil { + return err + } + + for _, obj := range output.Contents { + b.Objects = append(b.Objects, bucket.BucketObject{Key: *obj.Key, Size: uint64(obj.Size)}) + b.BucketSize += uint64(obj.Size) + } + + if !output.IsTruncated { + b.ObjectsEnumerated = true + break + } + continuationToken = output.NextContinuationToken + page += 1 + if page >= 5000 { // TODO: Should this limit be lowered? + return errors.New("more than 5000 pages of objects found. Skipping for now") + } + } + b.NumObjects = int32(len(b.Objects)) + return nil +} + +func checkPermissions(client *s3.Client, b *bucket.Bucket, doDestructiveChecks bool) error { + /* + // 1. Check if b exists + // 2. Check for READ_ACP + // 3. If FullControl is allowed for either AllUsers or AuthorizedUsers, skip the remainder of those tests + // 4. Check for READ + // 5. If doing destructive checks: + // 5a. Check for Write + // 5b. Check for WriteACP + */ + + b.DateScanned = time.Now() + + // Check for READ_ACP permission + canReadACL, err := permission.CheckPermReadACL(client, b) + if err != nil { + return fmt.Errorf("error occurred while checking for ReadACL: %v", err.Error()) + } + b.PermAllUsersReadACL = bucket.Permission(canReadACL) + // TODO: Can we skip the rest of the checks if READ_ACP is allowed? + + // We can skip the rest of the checks if FullControl is allowed + if b.PermAuthUsersFullControl == bucket.PermissionAllowed { + return nil + } + + // Check for READ permission + canRead, err := permission.CheckPermRead(client, b) + if err != nil { + return fmt.Errorf("%v | error occured while checking for READ: %v", b.Name, err.Error()) + } + b.PermAllUsersRead = bucket.Permission(canRead) + + if doDestructiveChecks { + // Check for WRITE permission + permWrite, writeErr := permission.CheckPermWrite(client, b) + if writeErr != nil { + return fmt.Errorf("%v | error occured while checking for WRITE: %v", b.Name, writeErr.Error()) + } + b.PermAllUsersWrite = bucket.Permission(permWrite) + + // Check for WRITE_ACP permission + permWriteAcl, writeAclErr := permission.CheckPermWriteAcl(client, b) + if writeAclErr != nil { + return fmt.Errorf("error occured while checking for WriteACL: %v", writeAclErr.Error()) + } + b.PermAllUsersWriteACL = bucket.Permission(permWriteAcl) + } + return nil +} + +func bucketExists(clients map[string]*s3.Client, b *bucket.Bucket) (bool, string, error) { + // TODO: Should this return a client or a region name? If region name, we'll need GetClient(region) + // TODO: Add region priority - order in which to check. maps are not ordered + results := make(chan bucketCheckResult, len(clients)) + e := make(chan error, 1) + + for region, client := range clients { + go func(bucketName string, client *s3.Client, region string) { + logFields := log.Fields{ + "bucket_name": b.Name, + "region": region, + "method": "providers.bucketExists()", + } + _, regionErr := manager.GetBucketRegion(context.TODO(), client, bucketName) + if regionErr == nil { + log.WithFields(logFields).Debugf("no error - bucket exists") + results <- bucketCheckResult{region: region, exists: true} + return + } + + var bnf manager.BucketNotFound + var re2 *awshttp.ResponseError + if errors.As(regionErr, &bnf) { + log.WithFields(logFields).Debugf("BucketNotFound") + results <- bucketCheckResult{region: region, exists: false} + } else if errors.As(regionErr, &re2) && re2.HTTPStatusCode() == 403 { + log.WithFields(logFields).Debugf("AccessDenied") + results <- bucketCheckResult{region: region, exists: true} + } else { + // If regionErr is a ResponseError, only return the unwrapped error i.e. "Method Not Allowed" + // Otherwise, return the whole error + err := regionErr + if errors.As(regionErr, &re2) { + err = re2.Unwrap() + } + log.WithFields(logFields).Debug(fmt.Errorf("unhandled error: %w", regionErr)) + e <- err + } + }(b.Name, client, region) + } + + for range clients { + select { + case err := <-e: + return false, "", err + case res := <-results: + if res.exists { + return true, res.region, nil + } + } + } + return false, "", nil +} diff --git a/provider/providers_test.go b/provider/providers_test.go new file mode 100644 index 0000000..09ed374 --- /dev/null +++ b/provider/providers_test.go @@ -0,0 +1,217 @@ +package provider + +import ( + "github.com/stretchr/testify/assert" + "os" + "s3scanner/bucket" + "testing" +) + +var providers = map[string]StorageProvider{} + +func TestMain(m *testing.M) { + var provider StorageProvider + + provider, err := NewProviderAWS() + if err != nil { + panic(err) + } + providers["aws"] = provider + + provider, err = NewCustomProvider( + "path", + false, + []string{"ewr1", "ams1"}, + "https://$REGION.vultrobjects.com") + if err != nil { + panic(err) + } + providers["custom"] = provider + + provider, err = NewProviderDO() + if err != nil { + panic(err) + } + providers["digitalocean"] = provider + + provider, err = NewProviderDreamhost() + if err != nil { + panic(err) + } + providers["dreamhost"] = provider + + provider, err = NewProviderGCP() + if err != nil { + panic(err) + } + providers["gcp"] = provider + + provider, err = NewProviderLinode() + if err != nil { + panic(err) + } + providers["linode"] = provider + + code := m.Run() + os.Exit(code) +} + +func failIfError(t *testing.T, err error) { + if err != nil { + t.Error(err) + } +} +func TestProvider_EnumerateListObjectsV2_short(t *testing.T) { + t.Parallel() + p, pErr := NewProviderAWS() + failIfError(t, pErr) + c, cErr := p.newClient("us-east-1") + failIfError(t, cErr) + + // Bucket with "page" of objects (<1k keys) + b := bucket.Bucket{Name: "s3scanner-bucketsize", + Exists: bucket.BucketExists, Region: "us-east-1", + PermAllUsersRead: bucket.PermissionAllowed} + enumErr := enumerateListObjectsV2(c, &b) + if enumErr != nil { + t.Errorf("error enumerating s3scanner-bucketsize: %e", enumErr) + } + assert.True(t, b.ObjectsEnumerated) + assert.Equal(t, 1, len(b.Objects)) + assert.Equal(t, uint64(43), b.BucketSize) +} + +func Test_EnumerateListObjectsV2_long(t *testing.T) { + t.Parallel() + p, pErr := NewProviderAWS() + failIfError(t, pErr) + c, cErr := p.newClient("us-east-1") + failIfError(t, cErr) + + // Bucket with more than 1k objects + b2 := bucket.Bucket{Name: "s3scanner-long", Exists: bucket.BucketExists, + Region: "us-east-1", PermAllUsersRead: bucket.PermissionAllowed} + b2Err := enumerateListObjectsV2(c, &b2) + if b2Err != nil { + t.Errorf("error enumerating s3scanner-long: %e", b2Err) + } + assert.True(t, b2.ObjectsEnumerated) + assert.Equal(t, 3501, len(b2.Objects)) + assert.Equal(t, uint64(4000), b2.BucketSize) +} + +func Test_StorageProvider_Statics(t *testing.T) { + t.Parallel() + + var tests = []struct { + name string + provider StorageProvider + insecure bool + addressStyle int + }{ + {name: "AWS", provider: providers["aws"], insecure: false, addressStyle: VirtualHostStyle}, + {name: "DO", provider: providers["digitalocean"], insecure: false, addressStyle: PathStyle}, + {name: "Dreamhost", provider: providers["dreamhost"], insecure: false, addressStyle: PathStyle}, + {name: "GCP", provider: providers["gcp"], insecure: false, addressStyle: PathStyle}, + {name: "Linode", provider: providers["linode"], insecure: false, addressStyle: VirtualHostStyle}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t2 *testing.T) { + assert.Equal(t2, tt.insecure, tt.provider.Insecure()) + assert.Equal(t2, tt.addressStyle, tt.provider.AddressStyle()) + }) + } +} + +func Test_StorageProvider_BucketExists(t *testing.T) { + t.Parallel() + + var tests = []struct { + name string + provider StorageProvider + goodBucket bucket.Bucket + badBucket bucket.Bucket + }{ + {name: "AWS", provider: providers["aws"], goodBucket: bucket.NewBucket("s3scanner-empty"), badBucket: bucket.NewBucket("s3scanner-no-exist")}, + {name: "DO", provider: providers["digitalocean"], goodBucket: bucket.NewBucket("logo"), badBucket: bucket.NewBucket("s3scanner-no-exist")}, + {name: "Dreamhost", provider: providers["dreamhost"], goodBucket: bucket.NewBucket("assets"), badBucket: bucket.NewBucket("s3scanner-no-exist")}, + {name: "GCP", provider: providers["gcp"], goodBucket: bucket.NewBucket("books"), badBucket: bucket.NewBucket("s3scanner-no-exist")}, + {name: "Linode", provider: providers["linode"], goodBucket: bucket.NewBucket("vantage"), badBucket: bucket.NewBucket("s3scanner-no-exist")}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t2 *testing.T) { + gb, err := tt.provider.BucketExists(&tt.goodBucket) + assert.Nil(t2, err) + assert.Equal(t2, bucket.BucketExists, gb.Exists) + + bb, err := tt.provider.BucketExists(&tt.badBucket) + assert.Nil(t2, err) + assert.Equal(t2, bucket.BucketNotExist, bb.Exists) + + }) + } +} + +func Test_StorageProvider_Enum(t *testing.T) { + t.Parallel() + + var tests = []struct { + name string + provider StorageProvider + goodBucket bucket.Bucket + numObjects int + }{ + {name: "AWS", provider: providers["aws"], goodBucket: bucket.NewBucket("s3scanner-empty"), numObjects: 0}, + {name: "Custom public-read", provider: providers["custom"], goodBucket: bucket.NewBucket("alicante"), numObjects: 209}, + {name: "Custom no public-read", provider: providers["custom"], goodBucket: bucket.NewBucket("assets"), numObjects: 0}, + {name: "DO", provider: providers["digitalocean"], goodBucket: bucket.NewBucket("action"), numObjects: 2}, + {name: "Dreamhost", provider: providers["dreamhost"], goodBucket: bucket.NewBucket("bitrix24"), numObjects: 6}, + {name: "GCP", provider: providers["gcp"], goodBucket: bucket.NewBucket("assets"), numObjects: 3}, + {name: "Linode", provider: providers["linode"], goodBucket: bucket.NewBucket("vantage"), numObjects: 45}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t2 *testing.T) { + gb, err := tt.provider.BucketExists(&tt.goodBucket) + assert.Nil(t2, err) + err = tt.provider.Scan(&tt.goodBucket, false) + assert.Nil(t2, err) + scanErr := tt.provider.Enumerate(gb) + assert.Nil(t2, scanErr) + assert.Equal(t2, bucket.BucketExists, gb.Exists) + assert.Equal(t2, int32(tt.numObjects), gb.NumObjects) + }) + } +} + +func Test_StorageProvider_Scan(t *testing.T) { + t.Parallel() + + var tests = []struct { + name string + provider StorageProvider + bucket bucket.Bucket + permissions string + }{ + {name: "AWS", provider: providers["aws"], bucket: bucket.NewBucket("s3scanner-empty"), permissions: "AuthUsers: [] | AllUsers: [READ]"}, + {name: "Custom public-read-write", provider: providers["custom"], bucket: bucket.NewBucket("nurse-virtual-assistants"), permissions: "AuthUsers: [] | AllUsers: [READ, WRITE]"}, + {name: "Custom no public-read", provider: providers["custom"], bucket: bucket.NewBucket("assets"), permissions: "AuthUsers: [] | AllUsers: []"}, + {name: "DO", provider: providers["digitalocean"], bucket: bucket.NewBucket("logo"), permissions: "AuthUsers: [] | AllUsers: [READ]"}, + {name: "Dreamhost", provider: providers["dreamhost"], bucket: bucket.NewBucket("bitrix24"), permissions: "AuthUsers: [] | AllUsers: [READ]"}, + {name: "GCP", provider: providers["gcp"], bucket: bucket.NewBucket("hatrioua"), permissions: "AuthUsers: [] | AllUsers: []"}, + {name: "Linode", provider: providers["linode"], bucket: bucket.NewBucket("vantage"), permissions: "AuthUsers: [] | AllUsers: [READ]"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t2 *testing.T) { + gb, err := tt.provider.BucketExists(&tt.bucket) + scanErr := tt.provider.Scan(gb, true) + assert.Nil(t2, err) + assert.Nil(t2, scanErr) + assert.Equal(t2, bucket.BucketExists, gb.Exists) + assert.Equal(t2, tt.bucket.String(), tt.permissions) + }) + } +} diff --git a/pyproject.toml b/pyproject.toml deleted file mode 100644 index f8d8975..0000000 --- a/pyproject.toml +++ /dev/null @@ -1,6 +0,0 @@ -[build-system] -requires = [ - "setuptools", - "wheel" -] -build-backend = "setuptools.build_meta" \ No newline at end of file diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 1e1f9b5..0000000 --- a/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -boto3>=1.20 diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 3e0c64f..0000000 --- a/setup.cfg +++ /dev/null @@ -1,33 +0,0 @@ -[metadata] -name = S3Scanner -version = 2.0.2 -author = Dan Salmon -author_email = dan@salmon.cat -description = Scan for open S3 buckets and dump the contents -long_description = file: README.md -long_description_content_type = text/markdown -url = https://github.com/sa7mon/S3Scanner -project_urls = - Bug Tracker = https://github.com/sa7mon/S3Scanner -classifiers = - Programming Language :: Python :: 3.6 - Programming Language :: Python :: 3.7 - Programming Language :: Python :: 3.8 - Programming Language :: Python :: 3.9 - Topic :: Security - License :: OSI Approved :: MIT License - Operating System :: OS Independent - -[options] -packages = S3Scanner -install_requires = - boto3>=1.20 -python_requires = >=3.6 - -[options.entry_points] -console_scripts = - s3scanner = S3Scanner.__main__:main - -[tool:pytest] -python_files=test_*.py -filterwarnings = ignore::pytest.PytestCollectionWarning diff --git a/tests/TestUtils.py b/tests/TestUtils.py deleted file mode 100644 index efa541a..0000000 --- a/tests/TestUtils.py +++ /dev/null @@ -1,49 +0,0 @@ -import random -import string -import boto3 - - -class TestBucketService: - def __init__(self): - self.session = boto3.Session(profile_name='privileged') - self.s3_client = self.session.client('s3') - - @staticmethod - def generate_random_bucket_name(length=40): - candidates = string.ascii_lowercase + string.digits - return 's3scanner-' + ''.join(random.choice(candidates) for i in range(length)) - - def delete_bucket(self, bucket_name): - self.s3_client.delete_bucket(Bucket=bucket_name) - - def create_bucket(self, danger_bucket): - bucket_name = self.generate_random_bucket_name() - - # For type descriptions, refer to: https://github.com/sa7mon/S3Scanner/wiki/Test-Buckets - if danger_bucket == 1: - self.s3_client.create_bucket(Bucket=bucket_name, - GrantWrite='uri=http://acs.amazonaws.com/groups/global/AuthenticatedUsers') - self.s3_client.put_bucket_acl(Bucket=bucket_name, - GrantWrite='uri=http://acs.amazonaws.com/groups/global/AuthenticatedUsers', - GrantWriteACP='uri=http://acs.amazonaws.com/groups/global/AuthenticatedUsers') - elif danger_bucket == 2: - self.s3_client.create_bucket(Bucket=bucket_name, - GrantWrite='uri=http://acs.amazonaws.com/groups/global/AllUsers', - GrantWriteACP='uri=http://acs.amazonaws.com/groups/global/AllUsers') - elif danger_bucket == 3: - self.s3_client.create_bucket(Bucket=bucket_name, - GrantRead='uri=http://acs.amazonaws.com/groups/global/AllUsers', - GrantWrite='uri=http://acs.amazonaws.com/groups/global/AuthenticatedUsers', - GrantWriteACP='uri=http://acs.amazonaws.com/groups/global/AuthenticatedUsers') - elif danger_bucket == 4: - self.s3_client.create_bucket(Bucket=bucket_name, - GrantWrite='uri=http://acs.amazonaws.com/groups/global/AuthenticatedUsers,' - 'uri=http://acs.amazonaws.com/groups/global/AllUsers') - elif danger_bucket == 5: - self.s3_client.create_bucket(Bucket=bucket_name, - GrantWriteACP='uri=http://acs.amazonaws.com/groups/global/AuthenticatedUsers,' - 'uri=http://acs.amazonaws.com/groups/global/AllUsers') - else: - raise Exception("Unknown danger bucket type") - - return bucket_name diff --git a/tests/test_bucket.py b/tests/test_bucket.py deleted file mode 100644 index c975f92..0000000 --- a/tests/test_bucket.py +++ /dev/null @@ -1,48 +0,0 @@ -from S3Scanner.S3Bucket import S3Bucket, S3BucketObject, Permission - -""" -Tests for S3Bucket class go here -""" - - -def test_invalid_bucket_name(): - try: - S3Bucket(name="asdf,;0()") - except ValueError as ve: - if str(ve) != "Invalid bucket name": - raise ve - - -def test_s3_bucket_object(): - o1 = S3BucketObject(key='index.html', size=8096, last_modified='2018-03-02T08:10:25.000Z') - o2 = S3BucketObject(key='home.html', size=2, last_modified='2018-03-02T08:10:25.000Z') - - assert o1 != o2 - assert o2 < o1 # test __lt__ method which compares keys - assert str(o1) == "Key: index.html, Size: 8096, LastModified: 2018-03-02T08:10:25.000Z" - assert o1.get_human_readable_size() == "7.9KB" - - -def test_check_bucket_name(): - S3Bucket(name="asdfasdf.s3.amazonaws.com") - S3Bucket(name="asdf:us-west-1") - - -def test_get_human_readable_permissions(): - b = S3Bucket(name='asdf') - b.AllUsersRead = Permission.ALLOWED - b.AllUsersWrite = Permission.ALLOWED - b.AllUsersReadACP = Permission.ALLOWED - b.AllUsersWriteACP = Permission.ALLOWED - b.AuthUsersRead = Permission.ALLOWED - b.AuthUsersWrite = Permission.ALLOWED - b.AuthUsersReadACP = Permission.ALLOWED - b.AuthUsersWriteACP = Permission.ALLOWED - - b.get_human_readable_permissions() - - b.AllUsersFullControl = Permission.ALLOWED - b.AuthUsersFullControl = Permission.ALLOWED - - b.get_human_readable_permissions() - diff --git a/tests/test_scanner.py b/tests/test_scanner.py deleted file mode 100644 index 664fc99..0000000 --- a/tests/test_scanner.py +++ /dev/null @@ -1,139 +0,0 @@ -import sys -import subprocess -import os -import time -import shutil - -from S3Scanner.S3Service import S3Service - - -def test_arguments(): - s = S3Service() - - a = subprocess.run([sys.executable, '-m', 'S3Scanner', '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - assert a.stdout.decode('utf-8').strip() == '2.0.2' - - b = subprocess.run([sys.executable, '-m', 'S3Scanner', 'scan', '--bucket', 'flaws.cloud'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - assert_scanner_output(s, 'flaws.cloud | bucket_exists | AuthUsers: [], AllUsers: [Read]', b.stdout.decode('utf-8').strip()) - - c = subprocess.run([sys.executable, '-m', 'S3Scanner', 'scan', '--bucket', 'asdfasdf---,'], stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - assert_scanner_output(s, 'asdfasdf---, | bucket_invalid_name', c.stdout.decode('utf-8').strip()) - - d = subprocess.run([sys.executable, '-m', 'S3Scanner', 'scan', '--bucket', 'isurehopethisbucketdoesntexistasdfasdf'], - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - assert_scanner_output(s, 'isurehopethisbucketdoesntexistasdfasdf | bucket_not_exist', d.stdout.decode('utf-8').strip()) - - e = subprocess.run([sys.executable, '-m', 'S3Scanner', 'scan', '--bucket', 'flaws.cloud', '--dangerous'], - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - assert_scanner_output(s, f"INFO: Including dangerous checks. WARNING: This may change bucket ACL destructively{os.linesep}flaws.cloud | bucket_exists | AuthUsers: [], AllUsers: [Read]", e.stdout.decode('utf-8').strip()) - - f = subprocess.run([sys.executable, '-m', 'S3Scanner', 'dump', '--bucket', 'flaws.cloud', '--dump-dir', './asfasdf'], - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - assert_scanner_output(s, "Error: Given --dump-dir does not exist or is not a directory", f.stdout.decode('utf-8').strip()) - - # Create temp folder to dump into - test_folder = os.path.join(os.getcwd(), 'testing_' + str(time.time())[0:10], '') - os.mkdir(test_folder) - - try: - f = subprocess.run([sys.executable, '-m', 'S3Scanner', 'dump', '--bucket', 'flaws.cloud', '--dump-dir', test_folder], - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - assert_scanner_output(s, f"flaws.cloud | Enumerating bucket objects...{os.linesep}flaws.cloud | Total Objects: 7, Total Size: 25.0KB{os.linesep}flaws.cloud | Dumping contents using 4 threads...{os.linesep}flaws.cloud | Dumping completed", f.stdout.decode('utf-8').strip()) - - g = subprocess.run([sys.executable, '-m', 'S3Scanner', 'dump', '--bucket', 'asdfasdf,asdfasd,', '--dump-dir', test_folder], - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - assert_scanner_output(s, "asdfasdf,asdfasd, | bucket_name_invalid", g.stdout.decode('utf-8').strip()) - - h = subprocess.run([sys.executable, '-m', 'S3Scanner', 'dump', '--bucket', 'isurehopethisbucketdoesntexistasdfasdf', '--dump-dir', test_folder], - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - assert_scanner_output(s, 'isurehopethisbucketdoesntexistasdfasdf | bucket_not_exist', h.stdout.decode('utf-8').strip()) - finally: - shutil.rmtree(test_folder) # Cleanup the testing folder - - -def test_endpoints(): - """ - Test the handling of non-AWS endpoints - :return: - """ - s = S3Service() - b = subprocess.run([sys.executable, '-m', 'S3Scanner', '--endpoint-url', 'https://sfo2.digitaloceanspaces.com', - 'scan', '--bucket', 's3scanner'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - assert_scanner_output(s, 's3scanner | bucket_not_exist', - b.stdout.decode('utf-8').strip()) - - c = subprocess.run([sys.executable, '-m', 'S3Scanner', '--endpoint-url', 'http://example.com', 'scan', '--bucket', - 's3scanner'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - assert c.stdout.decode('utf-8').strip() == "Error: Endpoint 'http://example.com' does not appear to be S3-compliant" - - -def assert_scanner_output(service, expected_output, found_output): - """ - If the tests are run without AWS creds configured, all the output from scanner.py will have a warning banner. - This is a convenience method to simplify comparing the expected output to the found output - - :param service: s3service - :param expected_output: string - :param found_output: string - :return: boolean - """ - creds_warning = "Warning: AWS credentials not configured - functionality will be limited. Run: `aws configure` to fix this." - - if service.aws_creds_configured: - assert expected_output == found_output - else: - assert f"{creds_warning}{os.linesep}{os.linesep}{expected_output}" == found_output - - -def test_check_aws_creds(): - """ - Scenario checkAwsCreds.1 - Output of checkAwsCreds() matches a more intense check for creds - """ - print("test_checkAwsCreds temporarily disabled.") - - # test_setup() - # - # # Check more thoroughly for creds being set. - # vars = os.environ - # - # keyid = vars.get("AWS_ACCESS_KEY_ID") - # key = vars.get("AWS_SECRET_ACCESS_KEY") - # credsFile = os.path.expanduser("~") + "/.aws/credentials" - # - # if keyid is not None and len(keyid) == 20: - # if key is not None and len(key) == 40: - # credsActuallyConfigured = True - # else: - # credsActuallyConfigured = False - # else: - # credsActuallyConfigured = False - # - # if os.path.exists(credsFile): - # print("credsFile path exists") - # if not credsActuallyConfigured: - # keyIdSet = None - # keySet = None - # - # # Check the ~/.aws/credentials file - # with open(credsFile, "r") as f: - # for line in f: - # line = line.strip() - # if line[0:17].lower() == 'aws_access_key_id': - # if len(line) >= 38: # key + value = length of at least 38 if no spaces around equals - # keyIdSet = True - # else: - # keyIdSet = False - # - # if line[0:21].lower() == 'aws_secret_access_key': - # if len(line) >= 62: - # keySet = True - # else: - # keySet = False - # - # if keyIdSet and keySet: - # credsActuallyConfigured = True - # - # # checkAwsCreds.1 - # assert s3.checkAwsCreds() == credsActuallyConfigured - diff --git a/tests/test_service.py b/tests/test_service.py deleted file mode 100644 index e51d094..0000000 --- a/tests/test_service.py +++ /dev/null @@ -1,600 +0,0 @@ -import os - -import pytest - -from S3Scanner.S3Service import S3Service -from S3Scanner.S3Bucket import BucketExists, Permission, S3BucketObject, S3Bucket -from TestUtils import TestBucketService -from S3Scanner.exceptions import AccessDeniedException, BucketMightNotExistException -from pathlib import Path -from urllib3 import disable_warnings - -testingFolder = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'test/') -setupRan = False - - -""" -S3Service.py methods to test: - -- init() - - βœ”οΈ Test service.aws_creds_configured is false when forceNoCreds = False -- check_bucket_exists() - - βœ”οΈ Test against that exists - - βœ”οΈ Test against one that doesn't -- check_perm_read_acl() - - βœ”οΈ Test against bucket with AllUsers allowed - - βœ”οΈ Test against bucket with AuthUsers allowed - - βœ”οΈ Test against bucket with all denied -- check_perm_read() - - βœ”οΈ Test against bucket with AuthUsers read permission - - βœ”οΈ Test against bucket with AllUsers read permission - - βœ”οΈ Test against bucket with no read permission -- check_perm_write() - - βœ”οΈ Test against bucket with no write permissions - - βœ”οΈ Test against bucket with AuthUsers write permission - - βœ”οΈ Test against bucket with AllUsers write permission - - βœ”οΈ Test against bucket with AllUsers and AuthUsers write permission -- check_perm_write_acl() - - βœ”οΈ Test against bucket with AllUsers allowed - - βœ”οΈ Test against bucket with AuthUsers allowed - - βœ”οΈ Test against bucket with both AllUsers allowed - - βœ”οΈ Test against bucket with no groups allowed -- enumerate_bucket_objects() - - βœ”οΈ Test against empty bucket - - βœ”οΈ Test against not empty bucket with read permission - - βœ”οΈ Test against bucket without read permission -- parse_found_acl() - - βœ”οΈ Test against JSON with FULL_CONTROL for AllUsers - - βœ”οΈ Test against JSON with FULL_CONTROL for AuthUsers - - βœ”οΈ Test against empty JSON - - βœ”οΈ Test against JSON with ReadACP for AuthUsers and Write for AllUsers -""" - - -def test_setup_new(): - global setupRan - if setupRan: # We only need to run this once per test-run - return - - # Create testingFolder if it doesn't exist - if not os.path.exists(testingFolder) or not os.path.isdir(testingFolder): - os.makedirs(testingFolder) - setupRan = True - - -def test_init(): - test_setup_new() - - s = S3Service(forceNoCreds=True) - assert s.aws_creds_configured is False - - -def test_bucket_exists(): - test_setup_new() - - s = S3Service() - - # Bucket that does exist - b1 = S3Bucket('s3scanner-private') - s.check_bucket_exists(b1) - assert b1.exists is BucketExists.YES - - # Bucket that doesn't exist (hopefully) - b2 = S3Bucket('asfasfasdfasdfasdf') - s.check_bucket_exists(b2) - assert b2.exists is BucketExists.NO - - # Pass a thing that's not a bucket - with pytest.raises(ValueError): - s.check_bucket_exists("asdfasdf") - - -def test_check_perm_read(): - test_setup_new() - - s = S3Service() - - # Bucket that no one can list - b1 = S3Bucket('s3scanner-private') - b1.exists = BucketExists.YES - s.check_perm_read(b1) - if s.aws_creds_configured: - assert b1.AuthUsersRead == Permission.DENIED - else: - assert b1.AllUsersRead == Permission.DENIED - - # Bucket that only AuthenticatedUsers can list - b2 = S3Bucket('s3scanner-auth-read') - b2.exists = BucketExists.YES - s.check_perm_read(b2) - if s.aws_creds_configured: - assert b2.AuthUsersRead == Permission.ALLOWED - else: - assert b2.AllUsersRead == Permission.DENIED - - # Bucket that Everyone can list - b3 = S3Bucket('s3scanner-long') - b3.exists = BucketExists.YES - s.check_perm_read(b3) - if s.aws_creds_configured: - assert b3.AuthUsersRead == Permission.ALLOWED - else: - assert b3.AllUsersRead == Permission.ALLOWED - - -def test_enumerate_bucket_objects(): - test_setup_new() - - s = S3Service() - - # Empty bucket - b1 = S3Bucket('s3scanner-empty') - b1.exists = BucketExists.YES - s.check_perm_read(b1) - if s.aws_creds_configured: - assert b1.AuthUsersRead == Permission.ALLOWED - else: - assert b1.AllUsersRead == Permission.ALLOWED - s.enumerate_bucket_objects(b1) - assert b1.objects_enumerated is True - assert b1.bucketSize == 0 - - # Bucket with > 1000 items - if s.aws_creds_configured: - b2 = S3Bucket('s3scanner-auth-read') - b2.exists = BucketExists.YES - s.check_perm_read(b2) - assert b2.AuthUsersRead == Permission.ALLOWED - s.enumerate_bucket_objects(b2) - assert b2.objects_enumerated is True - assert b2.bucketSize == 4143 - assert b2.get_human_readable_size() == "4.0KB" - else: - print("[test_enumerate_bucket_objects] Skipping test due to no AWS creds") - - # Bucket without read permission - b3 = S3Bucket('s3scanner-private') - b3.exists = BucketExists.YES - s.check_perm_read(b3) - if s.aws_creds_configured: - assert b3.AuthUsersRead == Permission.DENIED - else: - assert b3.AllUsersRead == Permission.DENIED - try: - s.enumerate_bucket_objects(b3) - except AccessDeniedException: - pass - - # Try to enumerate before checking if bucket exists - b4 = S3Bucket('s3scanner-enumerate-bucket') - with pytest.raises(Exception): - s.enumerate_bucket_objects(b4) - - -def test_check_perm_read_acl(): - test_setup_new() - s = S3Service() - - # Bucket with no read ACL perms - b1 = S3Bucket('s3scanner-private') - b1.exists = BucketExists.YES - s.check_perm_read_acl(b1) - if s.aws_creds_configured: - assert b1.AuthUsersReadACP == Permission.DENIED - else: - assert b1.AllUsersReadACP == Permission.DENIED - - # Bucket that allows AuthenticatedUsers to read ACL - if s.aws_creds_configured: - b2 = S3Bucket('s3scanner-auth-read-acl') - b2.exists = BucketExists.YES - s.check_perm_read_acl(b2) - if s.aws_creds_configured: - assert b2.AuthUsersReadACP == Permission.ALLOWED - else: - assert b2.AllUsersReadACP == Permission.DENIED - - # Bucket that allows AllUsers to read ACL - b3 = S3Bucket('s3scanner-all-readacp') - b3.exists = BucketExists.YES - s.check_perm_read_acl(b3) - assert b3.AllUsersReadACP == Permission.ALLOWED - assert b3.AllUsersWrite == Permission.DENIED - assert b3.AllUsersWriteACP == Permission.DENIED - assert b3.AuthUsersReadACP == Permission.DENIED - assert b3.AuthUsersWriteACP == Permission.DENIED - assert b3.AuthUsersWrite == Permission.DENIED - - -def test_check_perm_write(do_dangerous_test): - test_setup_new() - s = S3Service() - sAnon = S3Service(forceNoCreds=True) - - # Bucket with no write perms - b1 = S3Bucket('flaws.cloud') - b1.exists = BucketExists.YES - s.check_perm_write(b1) - - if s.aws_creds_configured: - assert b1.AuthUsersWrite == Permission.DENIED - else: - assert b1.AllUsersWrite == Permission.DENIED - - if do_dangerous_test: - print("[test_check_perm_write] Doing dangerous test") - ts = TestBucketService() - - danger_bucket_1 = ts.create_bucket(1) # Bucket with AuthUser Write, WriteACP permissions - try: - b2 = S3Bucket(danger_bucket_1) - b2.exists = BucketExists.YES - sAnon.check_perm_write(b2) - s.check_perm_write(b2) - assert b2.AuthUsersWrite == Permission.ALLOWED - assert b2.AllUsersWrite == Permission.DENIED - finally: - ts.delete_bucket(danger_bucket_1) - - danger_bucket_2 = ts.create_bucket(2) # Bucket with AllUser Write, WriteACP permissions - try: - b3 = S3Bucket(danger_bucket_2) - b3.exists = BucketExists.YES - sAnon.check_perm_write(b3) - s.check_perm_write(b3) - assert b3.AllUsersWrite == Permission.ALLOWED - assert b3.AuthUsersWrite == Permission.UNKNOWN - finally: - ts.delete_bucket(danger_bucket_2) - - # Bucket with AllUsers and AuthUser Write permissions - danger_bucket_4 = ts.create_bucket(4) - try: - b4 = S3Bucket(danger_bucket_4) - b4.exists = BucketExists.YES - sAnon.check_perm_write(b4) - s.check_perm_write(b4) - assert b4.AllUsersWrite == Permission.ALLOWED - assert b4.AuthUsersWrite == Permission.UNKNOWN - finally: - ts.delete_bucket(danger_bucket_4) - else: - print("[test_check_perm_write] Skipping dangerous test") - - -def test_check_perm_write_acl(do_dangerous_test): - test_setup_new() - s = S3Service() - sNoCreds = S3Service(forceNoCreds=True) - - # Bucket with no permissions - b1 = S3Bucket('s3scanner-private') - b1.exists = BucketExists.YES - s.check_perm_write_acl(b1) - if s.aws_creds_configured: - assert b1.AuthUsersWriteACP == Permission.DENIED - assert b1.AllUsersWriteACP == Permission.UNKNOWN - else: - assert b1.AllUsersWriteACP == Permission.DENIED - assert b1.AuthUsersWriteACP == Permission.UNKNOWN - - if do_dangerous_test: - print("[test_check_perm_write_acl] Doing dangerous tests...") - ts = TestBucketService() - - # Bucket with WRITE_ACP enabled for AuthUsers - danger_bucket_3 = ts.create_bucket(3) - try: - b2 = S3Bucket(danger_bucket_3) - b2.exists = BucketExists.YES - - # Check for read/write permissions so when we check for write_acl we - # send the same perms that it had originally - sNoCreds.check_perm_read(b2) - s.check_perm_read(b2) - sNoCreds.check_perm_write(b2) - s.check_perm_write(b2) - - # Check for WriteACP - sNoCreds.check_perm_write_acl(b2) - s.check_perm_write_acl(b2) - - # Grab permissions after our check so we can compare to original - sNoCreds.check_perm_write(b2) - s.check_perm_write(b2) - sNoCreds.check_perm_read(b2) - s.check_perm_read(b2) - if s.aws_creds_configured: - assert b2.AuthUsersWriteACP == Permission.ALLOWED - - # Make sure we didn't change the original permissions - assert b2.AuthUsersWrite == Permission.ALLOWED - assert b2.AllUsersWrite == Permission.DENIED - assert b2.AllUsersRead == Permission.ALLOWED - assert b2.AuthUsersRead == Permission.UNKNOWN - else: - assert b2.AllUsersRead == Permission.ALLOWED - assert b2.AuthUsersWriteACP == Permission.UNKNOWN - except Exception as e: - raise e - finally: - ts.delete_bucket(danger_bucket_3) - - # Bucket with WRITE_ACP enabled for AllUsers - danger_bucket_2 = ts.create_bucket(2) - try: - b3 = S3Bucket(danger_bucket_2) - b3.exists = BucketExists.YES - sNoCreds.check_perm_read(b3) - s.check_perm_read(b3) - sNoCreds.check_perm_write(b3) - s.check_perm_write(b3) - sNoCreds.check_perm_write_acl(b3) - s.check_perm_write_acl(b3) - sNoCreds.check_perm_write(b3) - s.check_perm_write(b3) - sNoCreds.check_perm_read(b3) - s.check_perm_read(b3) - if s.aws_creds_configured: - assert b3.AllUsersWriteACP == Permission.ALLOWED - assert b3.AuthUsersWriteACP == Permission.UNKNOWN - assert b3.AllUsersWrite == Permission.ALLOWED - else: - assert b3.AllUsersRead == Permission.ALLOWED - assert b3.AuthUsersWriteACP == Permission.UNKNOWN - except Exception as e: - raise e - finally: - ts.delete_bucket(danger_bucket_2) - - # Bucket with WRITE_ACP enabled for both AllUsers and AuthUsers - danger_bucket_5 = ts.create_bucket(5) - try: - b5 = S3Bucket(danger_bucket_5) - b5.exists = BucketExists.YES - sNoCreds.check_perm_read(b5) - s.check_perm_read(b5) - sNoCreds.check_perm_write(b5) - s.check_perm_write(b5) - sNoCreds.check_perm_write_acl(b5) - s.check_perm_write_acl(b5) - sNoCreds.check_perm_write(b5) - s.check_perm_write(b5) - sNoCreds.check_perm_read(b5) - s.check_perm_read(b5) - assert b5.AllUsersWriteACP == Permission.ALLOWED - assert b5.AuthUsersWriteACP == Permission.UNKNOWN - assert b5.AllUsersWrite == Permission.DENIED - assert b5.AuthUsersWrite == Permission.DENIED - except Exception as e: - raise e - finally: - ts.delete_bucket(danger_bucket_5) - else: - print("[test_check_perm_write_acl] Skipping dangerous test...") - - -def test_parse_found_acl(): - test_setup_new() - sAnon = S3Service(forceNoCreds=True) - - b1 = S3Bucket('s3scanner-all-read-readacl') - b1.exists = BucketExists.YES - sAnon.check_perm_read_acl(b1) - - assert b1.foundACL is not None - assert b1.AllUsersRead == Permission.ALLOWED - assert b1.AllUsersReadACP == Permission.ALLOWED - assert b1.AllUsersWrite == Permission.DENIED - assert b1.AllUsersWriteACP == Permission.DENIED - assert b1.AllUsersFullControl == Permission.DENIED - - assert b1.AuthUsersReadACP == Permission.DENIED - assert b1.AuthUsersRead == Permission.DENIED - assert b1.AuthUsersWrite == Permission.DENIED - assert b1.AuthUsersWriteACP == Permission.DENIED - assert b1.AuthUsersFullControl == Permission.DENIED - - test_acls_1 = { - 'Grants': [ - { - 'Grantee': { - 'Type': 'Group', - 'URI': 'http://acs.amazonaws.com/groups/global/AllUsers' - }, - 'Permission': 'FULL_CONTROL' - } - ] - } - - b2 = S3Bucket('test-acl-doesnt-exist') - b2.exists = BucketExists.YES - b2.foundACL = test_acls_1 - sAnon.parse_found_acl(b2) - assert b2.AllUsersRead == Permission.ALLOWED - assert b2.AllUsersReadACP == Permission.ALLOWED - assert b2.AllUsersWrite == Permission.ALLOWED - assert b2.AllUsersWriteACP == Permission.ALLOWED - assert b2.AllUsersFullControl == Permission.ALLOWED - assert b2.AuthUsersRead == Permission.DENIED - assert b2.AuthUsersReadACP == Permission.DENIED - assert b2.AuthUsersWrite == Permission.DENIED - assert b2.AuthUsersWriteACP == Permission.DENIED - assert b2.AuthUsersFullControl == Permission.DENIED - - test_acls_2 = { - 'Grants': [ - { - 'Grantee': { - 'Type': 'Group', - 'URI': 'http://acs.amazonaws.com/groups/global/AuthenticatedUsers' - }, - 'Permission': 'FULL_CONTROL' - } - ] - } - - b3 = S3Bucket('test-acl2-doesnt-exist') - b3.exists = BucketExists.YES - b3.foundACL = test_acls_2 - sAnon.parse_found_acl(b3) - assert b3.AllUsersRead == Permission.DENIED - assert b3.AllUsersReadACP == Permission.DENIED - assert b3.AllUsersWrite == Permission.DENIED - assert b3.AllUsersWriteACP == Permission.DENIED - assert b3.AllUsersFullControl == Permission.DENIED - assert b3.AuthUsersRead == Permission.ALLOWED - assert b3.AuthUsersReadACP == Permission.ALLOWED - assert b3.AuthUsersWrite == Permission.ALLOWED - assert b3.AuthUsersWriteACP == Permission.ALLOWED - assert b3.AuthUsersFullControl == Permission.ALLOWED - - test_acls_3 = { - 'Grants': [ - { - 'Grantee': { - 'Type': 'Group', - 'URI': 'asdfasdf' - }, - 'Permission': 'READ' - } - ] - } - - b4 = S3Bucket('test-acl3-doesnt-exist') - b4.exists = BucketExists.YES - b4.foundACL = test_acls_3 - sAnon.parse_found_acl(b4) - - all_permissions = [b4.AllUsersRead, b4.AllUsersReadACP, b4.AllUsersWrite, b4.AllUsersWriteACP, - b4.AllUsersFullControl, b4.AuthUsersRead, b4.AuthUsersReadACP, b4.AuthUsersWrite, - b4.AuthUsersWriteACP, b4.AuthUsersFullControl] - - for p in all_permissions: - assert p == Permission.DENIED - - test_acls_4 = { - 'Grants': [ - { - 'Grantee': { - 'Type': 'Group', - 'URI': 'http://acs.amazonaws.com/groups/global/AuthenticatedUsers' - }, - 'Permission': 'READ_ACP' - }, - { - 'Grantee': { - 'Type': 'Group', - 'URI': 'http://acs.amazonaws.com/groups/global/AllUsers' - }, - 'Permission': 'READ_ACP' - } - ] - } - - b5 = S3Bucket('test-acl4-doesnt-exist') - b5.exists = BucketExists.YES - b5.foundACL = test_acls_4 - sAnon.parse_found_acl(b5) - assert b5.AllUsersRead == Permission.DENIED - assert b5.AllUsersReadACP == Permission.ALLOWED - assert b5.AllUsersWrite == Permission.DENIED - assert b5.AllUsersWriteACP == Permission.DENIED - assert b5.AllUsersFullControl == Permission.DENIED - assert b5.AuthUsersRead == Permission.DENIED - assert b5.AuthUsersReadACP == Permission.ALLOWED - assert b5.AuthUsersWrite == Permission.DENIED - assert b5.AuthUsersWriteACP == Permission.DENIED - assert b5.AuthUsersFullControl == Permission.DENIED - - -def test_check_perms_without_checking_bucket_exists(): - test_setup_new() - sAnon = S3Service(forceNoCreds=True) - - b1 = S3Bucket('blahblah') - with pytest.raises(BucketMightNotExistException): - sAnon.check_perm_read_acl(b1) - - with pytest.raises(BucketMightNotExistException): - sAnon.check_perm_read(b1) - - with pytest.raises(BucketMightNotExistException): - sAnon.check_perm_write(b1) - - with pytest.raises(BucketMightNotExistException): - sAnon.check_perm_write_acl(b1) - - -def test_no_ssl(): - test_setup_new() - S3Service(verify_ssl=False) - - -def test_download_file(): - test_setup_new() - s = S3Service() - - # Try to download a file that already exists - dest_folder = os.path.realpath(testingFolder) - Path(os.path.join(dest_folder, 'test_download_file.txt')).touch() - size = Path(os.path.join(dest_folder, 'test_download_file.txt')).stat().st_size - - o = S3BucketObject(size=size, last_modified="2020-12-31_03-02-11z", key="test_download_file.txt") - - b = S3Bucket("bucket-no-existo") - s.download_file(os.path.join(dest_folder, ''), b, True, o) - -def test_is_safe_file_to_download(): - test_setup_new() - s = S3Service() - - # Check a good file name - assert s.is_safe_file_to_download("file.txt", "./bucket_dir/") == True - assert s.is_safe_file_to_download("file.txt", "./bucket_dir") == True - - # Check file with relative name - assert s.is_safe_file_to_download("../file.txt", "./buckets/") == False - assert s.is_safe_file_to_download("../", "./buckets/") == False - assert s.is_safe_file_to_download("/file.txt", "./buckets/") == False - - -def test_validate_endpoint_url_nonaws(): - disable_warnings() - s = S3Service() - - # Test CenturyLink_Lumen - s.endpoint_url = 'https://useast.os.ctl.io' - assert s.validate_endpoint_url(use_ssl=True, verify_ssl=True, endpoint_address_style='path') is True - - # Test DigitalOcean - s.endpoint_url = 'https://sfo2.digitaloceanspaces.com' - assert s.validate_endpoint_url(use_ssl=True, verify_ssl=True, endpoint_address_style='path') is True - - # Test Dreamhost - s.endpoint_url = 'https://objects.dreamhost.com' - assert s.validate_endpoint_url(use_ssl=False, verify_ssl=False, endpoint_address_style='vhost') is True - - # Test GCP - s.endpoint_url = 'https://storage.googleapis.com' - assert s.validate_endpoint_url(use_ssl=True, verify_ssl=True, endpoint_address_style='path') is True - - # Test IBM - s.endpoint_url = 'https://s3.us-east.cloud-object-storage.appdomain.cloud' - assert s.validate_endpoint_url(use_ssl=True, verify_ssl=True, endpoint_address_style='path') is True - - # Test Linode - s.endpoint_url = 'https://eu-central-1.linodeobjects.com' - assert s.validate_endpoint_url(use_ssl=True, verify_ssl=True, endpoint_address_style='path') is True - - # Test Scaleway - s.endpoint_url = 'https://s3.nl-ams.scw.cloud' - assert s.validate_endpoint_url(use_ssl=True, verify_ssl=True, endpoint_address_style='path') is True - - # Test Vultr - s.endpoint_url = 'https://ewr1.vultrobjects.com' - assert s.validate_endpoint_url(use_ssl=True, verify_ssl=True, endpoint_address_style='path') is True - - # Test Wasabi - s.endpoint_url = 'https://s3.wasabisys.com' - assert s.validate_endpoint_url(use_ssl=True, verify_ssl=True, endpoint_address_style='path') is True